1 // SPDX-License-Identifier: MIT
3 * Copyright 2022 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "dcn32_fpu.h"
27 #include "dcn32/dcn32_resource.h"
28 #include "dcn20/dcn20_resource.h"
29 #include "display_mode_vba_util_32.h"
30 #include "dml/dcn32/display_mode_vba_32.h"
31 // We need this includes for WATERMARKS_* defines
32 #include "clk_mgr/dcn32/dcn32_smu13_driver_if.h"
33 #include "dcn30/dcn30_resource.h"
35 #include "dc_state_priv.h"
37 #define DC_LOGGER_INIT(logger)
39 static const struct subvp_high_refresh_list subvp_high_refresh_list = {
43 {.width = 3840, .height = 2160, },
44 {.width = 3440, .height = 1440, },
45 {.width = 2560, .height = 1440, },
46 {.width = 1920, .height = 1080, }},
49 static const struct subvp_active_margin_list subvp_active_margin_list = {
53 {.width = 2560, .height = 1440, },
54 {.width = 1920, .height = 1080, }},
57 struct _vcs_dpi_ip_params_st dcn3_2_ip = {
59 .gpuvm_max_page_table_levels = 4,
61 .rob_buffer_size_kbytes = 128,
62 .det_buffer_size_kbytes = DCN3_2_DEFAULT_DET_SIZE,
63 .config_return_buffer_size_in_kbytes = 1280,
64 .compressed_buffer_segment_size_in_kbytes = 64,
65 .meta_fifo_size_in_kentries = 22,
66 .zero_size_buffer_entries = 512,
67 .compbuf_reserved_space_64b = 256,
68 .compbuf_reserved_space_zs = 64,
69 .dpp_output_buffer_pixels = 2560,
70 .opp_output_buffer_lines = 1,
71 .pixel_chunk_size_kbytes = 8,
72 .alpha_pixel_chunk_size_kbytes = 4,
73 .min_pixel_chunk_size_bytes = 1024,
74 .dcc_meta_buffer_size_bytes = 6272,
75 .meta_chunk_size_kbytes = 2,
76 .min_meta_chunk_size_bytes = 256,
77 .writeback_chunk_size_kbytes = 8,
78 .ptoi_supported = false,
80 .maximum_dsc_bits_per_component = 12,
81 .maximum_pixels_per_line_per_dsc_unit = 6016,
82 .dsc422_native_support = true,
83 .is_line_buffer_bpp_fixed = true,
84 .line_buffer_fixed_bpp = 57,
85 .line_buffer_size_bits = 1171920,
86 .max_line_buffer_lines = 32,
87 .writeback_interface_buffer_size_kbytes = 90,
90 .max_num_hdmi_frl_outputs = 1,
92 .max_dchub_pscl_bw_pix_per_clk = 4,
93 .max_pscl_lb_bw_pix_per_clk = 2,
94 .max_lb_vscl_bw_pix_per_clk = 4,
95 .max_vscl_hscl_bw_pix_per_clk = 4,
100 .dpte_buffer_size_in_pte_reqs_luma = 64,
101 .dpte_buffer_size_in_pte_reqs_chroma = 34,
102 .dispclk_ramp_margin_percent = 1,
103 .max_inter_dcn_tile_repeaters = 8,
104 .cursor_buffer_size = 16,
105 .cursor_chunk_size = 2,
106 .writeback_line_buffer_buffer_size = 0,
107 .writeback_min_hscl_ratio = 1,
108 .writeback_min_vscl_ratio = 1,
109 .writeback_max_hscl_ratio = 1,
110 .writeback_max_vscl_ratio = 1,
111 .writeback_max_hscl_taps = 1,
112 .writeback_max_vscl_taps = 1,
113 .dppclk_delay_subtotal = 47,
114 .dppclk_delay_scl = 50,
115 .dppclk_delay_scl_lb_only = 16,
116 .dppclk_delay_cnvc_formatter = 28,
117 .dppclk_delay_cnvc_cursor = 6,
118 .dispclk_delay_subtotal = 125,
119 .dynamic_metadata_vm_enabled = false,
120 .odm_combine_4to1_supported = false,
121 .dcc_supported = true,
122 .max_num_dp2p0_outputs = 2,
123 .max_num_dp2p0_streams = 4,
126 struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
130 .dcfclk_mhz = 1564.0,
131 .fabricclk_mhz = 2500.0,
132 .dispclk_mhz = 2150.0,
133 .dppclk_mhz = 2150.0,
135 .phyclk_d18_mhz = 667.0,
136 .phyclk_d32_mhz = 625.0,
137 .socclk_mhz = 1200.0,
138 .dscclk_mhz = 716.667,
139 .dram_speed_mts = 18000.0,
140 .dtbclk_mhz = 1564.0,
144 .sr_exit_time_us = 42.97,
145 .sr_enter_plus_exit_time_us = 49.94,
146 .sr_exit_z8_time_us = 285.0,
147 .sr_enter_plus_exit_z8_time_us = 320,
148 .writeback_latency_us = 12.0,
149 .round_trip_ping_latency_dcfclk_cycles = 263,
150 .urgent_latency_pixel_data_only_us = 4.0,
151 .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
152 .urgent_latency_vm_data_only_us = 4.0,
153 .fclk_change_latency_us = 25,
154 .usr_retraining_latency_us = 2,
156 .mall_allocated_for_dcn_mbytes = 64,
157 .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
158 .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
159 .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
160 .pct_ideal_sdp_bw_after_urgent = 90.0,
161 .pct_ideal_fabric_bw_after_urgent = 67.0,
162 .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0,
163 .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented
164 .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0, // N/A, for now keep as is until DML implemented
165 .pct_ideal_dram_bw_after_urgent_strobe = 67.0,
166 .max_avg_sdp_bw_use_normal_percent = 80.0,
167 .max_avg_fabric_bw_use_normal_percent = 60.0,
168 .max_avg_dram_bw_use_normal_strobe_percent = 50.0,
169 .max_avg_dram_bw_use_normal_percent = 15.0,
171 .dram_channel_width_bytes = 2,
172 .fabric_datapath_to_dcn_data_return_bytes = 64,
173 .return_bus_width_bytes = 64,
174 .downspread_percent = 0.38,
175 .dcn_downspread_percent = 0.5,
176 .dram_clock_change_latency_us = 400,
177 .dispclk_dppclk_vco_speed_mhz = 4300.0,
178 .do_urgent_latency_adjustment = true,
179 .urgent_latency_adjustment_fabric_clock_component_us = 1.0,
180 .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
183 void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
186 double pstate_latency_us = clk_mgr->base.ctx->dc->dml.soc.dram_clock_change_latency_us;
187 double fclk_change_latency_us = clk_mgr->base.ctx->dc->dml.soc.fclk_change_latency_us;
188 double sr_exit_time_us = clk_mgr->base.ctx->dc->dml.soc.sr_exit_time_us;
189 double sr_enter_plus_exit_time_us = clk_mgr->base.ctx->dc->dml.soc.sr_enter_plus_exit_time_us;
190 /* For min clocks use as reported by PM FW and report those as min */
191 uint16_t min_uclk_mhz = clk_mgr->base.bw_params->clk_table.entries[0].memclk_mhz;
192 uint16_t min_dcfclk_mhz = clk_mgr->base.bw_params->clk_table.entries[0].dcfclk_mhz;
193 uint16_t setb_min_uclk_mhz = min_uclk_mhz;
194 uint16_t dcfclk_mhz_for_the_second_state = clk_mgr->base.ctx->dc->dml.soc.clock_limits[2].dcfclk_mhz;
196 dc_assert_fp_enabled();
198 /* For Set B ranges use min clocks state 2 when available, and report those to PM FW */
199 if (dcfclk_mhz_for_the_second_state)
200 clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = dcfclk_mhz_for_the_second_state;
202 clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = clk_mgr->base.bw_params->clk_table.entries[0].dcfclk_mhz;
204 if (clk_mgr->base.bw_params->clk_table.entries[2].memclk_mhz)
205 setb_min_uclk_mhz = clk_mgr->base.bw_params->clk_table.entries[2].memclk_mhz;
207 /* Set A - Normal - default values */
208 clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].valid = true;
209 clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us = pstate_latency_us;
210 clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us = fclk_change_latency_us;
211 clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us = sr_exit_time_us;
212 clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
213 clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
214 clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
215 clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
216 clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
217 clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
219 /* Set B - Performance - higher clocks, using DPM[2] DCFCLK and UCLK */
220 clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].valid = true;
221 clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us = pstate_latency_us;
222 clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.fclk_change_latency_us = fclk_change_latency_us;
223 clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us = sr_exit_time_us;
224 clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
225 clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
226 clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_dcfclk = 0xFFFF;
227 clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_uclk = setb_min_uclk_mhz;
228 clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_uclk = 0xFFFF;
230 /* Set C - Dummy P-State - P-State latency set to "dummy p-state" value */
231 /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */
232 if (clk_mgr->base.ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) {
233 clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].valid = true;
234 clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 50;
235 clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us = fclk_change_latency_us;
236 clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us;
237 clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
238 clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
239 clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
240 clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_dcfclk = 0xFFFF;
241 clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz;
242 clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF;
243 clk_mgr->base.bw_params->dummy_pstate_table[0].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[0].memclk_mhz * 16;
244 clk_mgr->base.bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 50;
245 clk_mgr->base.bw_params->dummy_pstate_table[1].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[1].memclk_mhz * 16;
246 clk_mgr->base.bw_params->dummy_pstate_table[1].dummy_pstate_latency_us = 9;
247 clk_mgr->base.bw_params->dummy_pstate_table[2].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[2].memclk_mhz * 16;
248 clk_mgr->base.bw_params->dummy_pstate_table[2].dummy_pstate_latency_us = 8;
249 clk_mgr->base.bw_params->dummy_pstate_table[3].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[3].memclk_mhz * 16;
250 clk_mgr->base.bw_params->dummy_pstate_table[3].dummy_pstate_latency_us = 5;
252 /* Set D - MALL - SR enter and exit time specific to MALL, TBD after bringup or later phase for now use DRAM values / 2 */
253 /* For MALL DRAM clock change latency is N/A, for watermak calculations use lowest value dummy P state latency */
254 clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].valid = true;
255 clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = clk_mgr->base.bw_params->dummy_pstate_table[3].dummy_pstate_latency_us;
256 clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.fclk_change_latency_us = fclk_change_latency_us;
257 clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = sr_exit_time_us / 2; // TBD
258 clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us / 2; // TBD
259 clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL;
260 clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
261 clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF;
262 clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz;
263 clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
267 * Finds dummy_latency_index when MCLK switching using firmware based
268 * vblank stretch is enabled. This function will iterate through the
269 * table of dummy pstate latencies until the lowest value that allows
270 * dm_allow_self_refresh_and_mclk_switch to happen is found
272 int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
273 struct dc_state *context,
274 display_e2e_pipe_params_st *pipes,
278 const int max_latency_table_entries = 4;
279 struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
280 int dummy_latency_index = 0;
281 enum clock_change_support temp_clock_change_support = vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
283 dc_assert_fp_enabled();
285 while (dummy_latency_index < max_latency_table_entries) {
286 if (temp_clock_change_support != dm_dram_clock_change_unsupported)
287 vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support;
288 context->bw_ctx.dml.soc.dram_clock_change_latency_us =
289 dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
290 dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
292 /* for subvp + DRR case, if subvp pipes are still present we support pstate */
293 if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported &&
294 dcn32_subvp_in_use(dc, context))
295 vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support;
297 if (vlevel < context->bw_ctx.dml.vba.soc.num_states &&
298 vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported)
301 dummy_latency_index++;
304 if (dummy_latency_index == max_latency_table_entries) {
305 ASSERT(dummy_latency_index != max_latency_table_entries);
306 /* If the execution gets here, it means dummy p_states are
307 * not possible. This should never happen and would mean
308 * something is severely wrong.
309 * Here we reset dummy_latency_index to 3, because it is
310 * better to have underflows than system crashes.
312 dummy_latency_index = max_latency_table_entries - 1;
315 return dummy_latency_index;
319 * dcn32_helper_populate_phantom_dlg_params - Get DLG params for phantom pipes
320 * and populate pipe_ctx with those params.
321 * @dc: [in] current dc state
322 * @context: [in] new dc state
323 * @pipes: [in] DML pipe params array
324 * @pipe_cnt: [in] DML pipe count
326 * This function must be called AFTER the phantom pipes are added to context
327 * and run through DML (so that the DLG params for the phantom pipes can be
328 * populated), and BEFORE we program the timing for the phantom pipes.
330 void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
331 struct dc_state *context,
332 display_e2e_pipe_params_st *pipes,
335 uint32_t i, pipe_idx;
337 dc_assert_fp_enabled();
339 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
340 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
345 if (pipe->plane_state && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
346 pipes[pipe_idx].pipe.dest.vstartup_start =
347 get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
348 pipes[pipe_idx].pipe.dest.vupdate_offset =
349 get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
350 pipes[pipe_idx].pipe.dest.vupdate_width =
351 get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
352 pipes[pipe_idx].pipe.dest.vready_offset =
353 get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
354 pipe->pipe_dlg_param = pipes[pipe_idx].pipe.dest;
360 static float calculate_net_bw_in_kbytes_sec(struct _vcs_dpi_voltage_scaling_st *entry)
362 float memory_bw_kbytes_sec;
363 float fabric_bw_kbytes_sec;
364 float sdp_bw_kbytes_sec;
365 float limiting_bw_kbytes_sec;
367 memory_bw_kbytes_sec = entry->dram_speed_mts *
368 dcn3_2_soc.num_chans *
369 dcn3_2_soc.dram_channel_width_bytes *
370 ((float)dcn3_2_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only / 100);
372 fabric_bw_kbytes_sec = entry->fabricclk_mhz *
373 dcn3_2_soc.return_bus_width_bytes *
374 ((float)dcn3_2_soc.pct_ideal_fabric_bw_after_urgent / 100);
376 sdp_bw_kbytes_sec = entry->dcfclk_mhz *
377 dcn3_2_soc.return_bus_width_bytes *
378 ((float)dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / 100);
380 limiting_bw_kbytes_sec = memory_bw_kbytes_sec;
382 if (fabric_bw_kbytes_sec < limiting_bw_kbytes_sec)
383 limiting_bw_kbytes_sec = fabric_bw_kbytes_sec;
385 if (sdp_bw_kbytes_sec < limiting_bw_kbytes_sec)
386 limiting_bw_kbytes_sec = sdp_bw_kbytes_sec;
388 return limiting_bw_kbytes_sec;
391 static void get_optimal_ntuple(struct _vcs_dpi_voltage_scaling_st *entry)
393 if (entry->dcfclk_mhz > 0) {
394 float bw_on_sdp = entry->dcfclk_mhz * dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / 100);
396 entry->fabricclk_mhz = bw_on_sdp / (dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_fabric_bw_after_urgent / 100));
397 entry->dram_speed_mts = bw_on_sdp / (dcn3_2_soc.num_chans *
398 dcn3_2_soc.dram_channel_width_bytes * ((float)dcn3_2_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only / 100));
399 } else if (entry->fabricclk_mhz > 0) {
400 float bw_on_fabric = entry->fabricclk_mhz * dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_fabric_bw_after_urgent / 100);
402 entry->dcfclk_mhz = bw_on_fabric / (dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / 100));
403 entry->dram_speed_mts = bw_on_fabric / (dcn3_2_soc.num_chans *
404 dcn3_2_soc.dram_channel_width_bytes * ((float)dcn3_2_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only / 100));
405 } else if (entry->dram_speed_mts > 0) {
406 float bw_on_dram = entry->dram_speed_mts * dcn3_2_soc.num_chans *
407 dcn3_2_soc.dram_channel_width_bytes * ((float)dcn3_2_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only / 100);
409 entry->fabricclk_mhz = bw_on_dram / (dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_fabric_bw_after_urgent / 100));
410 entry->dcfclk_mhz = bw_on_dram / (dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / 100));
414 static void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
415 unsigned int *num_entries,
416 struct _vcs_dpi_voltage_scaling_st *entry)
421 dc_assert_fp_enabled();
423 if (*num_entries == 0) {
427 while (entry->net_bw_in_kbytes_sec > table[index].net_bw_in_kbytes_sec) {
429 if (index >= *num_entries)
433 for (i = *num_entries; i > index; i--)
434 table[i] = table[i - 1];
436 table[index] = *entry;
442 * dcn32_set_phantom_stream_timing - Set timing params for the phantom stream
443 * @dc: current dc state
444 * @context: new dc state
445 * @ref_pipe: Main pipe for the phantom stream
446 * @phantom_stream: target phantom stream state
447 * @pipes: DML pipe params
448 * @pipe_cnt: number of DML pipes
449 * @dc_pipe_idx: DC pipe index for the main pipe (i.e. ref_pipe)
451 * Set timing params of the phantom stream based on calculated output from DML.
452 * This function first gets the DML pipe index using the DC pipe index, then
453 * calls into DML (get_subviewport_lines_needed_in_mall) to get the number of
454 * lines required for SubVP MCLK switching and assigns to the phantom stream
457 * - The number of SubVP lines calculated in DML does not take into account
458 * FW processing delays and required pstate allow width, so we must include
461 * - Set phantom backporch = vstartup of main pipe
463 void dcn32_set_phantom_stream_timing(struct dc *dc,
464 struct dc_state *context,
465 struct pipe_ctx *ref_pipe,
466 struct dc_stream_state *phantom_stream,
467 display_e2e_pipe_params_st *pipes,
468 unsigned int pipe_cnt,
469 unsigned int dc_pipe_idx)
471 unsigned int i, pipe_idx;
472 struct pipe_ctx *pipe;
473 uint32_t phantom_vactive, phantom_bp, pstate_width_fw_delay_lines;
474 unsigned int num_dpp;
475 unsigned int vlevel = context->bw_ctx.dml.vba.VoltageLevel;
476 unsigned int dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
477 unsigned int socclk = context->bw_ctx.dml.vba.SOCCLKPerState[vlevel];
478 struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
479 struct dc_stream_state *main_stream = ref_pipe->stream;
481 dc_assert_fp_enabled();
483 // Find DML pipe index (pipe_idx) using dc_pipe_idx
484 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
485 pipe = &context->res_ctx.pipe_ctx[i];
490 if (i == dc_pipe_idx)
496 // Calculate lines required for pstate allow width and FW processing delays
497 pstate_width_fw_delay_lines = ((double)(dc->caps.subvp_fw_processing_delay_us +
498 dc->caps.subvp_pstate_allow_width_us) / 1000000) *
499 (ref_pipe->stream->timing.pix_clk_100hz * 100) /
500 (double)ref_pipe->stream->timing.h_total;
502 // Update clks_cfg for calling into recalculate
503 pipes[0].clks_cfg.voltage = vlevel;
504 pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
505 pipes[0].clks_cfg.socclk_mhz = socclk;
507 // DML calculation for MALL region doesn't take into account FW delay
508 // and required pstate allow width for multi-display cases
509 /* Add 16 lines margin to the MALL REGION because SUB_VP_START_LINE must be aligned
510 * to 2 swaths (i.e. 16 lines)
512 phantom_vactive = get_subviewport_lines_needed_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx) +
513 pstate_width_fw_delay_lines + dc->caps.subvp_swath_height_margin_lines;
515 // W/A for DCC corruption with certain high resolution timings.
516 // Determing if pipesplit is used. If so, add meta_row_height to the phantom vactive.
517 num_dpp = vba->NoOfDPP[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]];
518 phantom_vactive += num_dpp > 1 ? vba->meta_row_height[vba->pipe_plane[pipe_idx]] : 0;
520 /* dc->debug.subvp_extra_lines 0 by default*/
521 phantom_vactive += dc->debug.subvp_extra_lines;
523 // For backporch of phantom pipe, use vstartup of the main pipe
524 phantom_bp = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
526 phantom_stream->dst.y = 0;
527 phantom_stream->dst.height = phantom_vactive;
528 /* When scaling, DML provides the end to end required number of lines for MALL.
529 * dst.height is always correct for this case, but src.height is not which causes a
530 * delta between main and phantom pipe scaling outputs. Need to adjust src.height on
531 * phantom for this case.
533 phantom_stream->src.y = 0;
534 phantom_stream->src.height = (double)phantom_vactive * (double)main_stream->src.height / (double)main_stream->dst.height;
536 phantom_stream->timing.v_addressable = phantom_vactive;
537 phantom_stream->timing.v_front_porch = 1;
538 phantom_stream->timing.v_total = phantom_stream->timing.v_addressable +
539 phantom_stream->timing.v_front_porch +
540 phantom_stream->timing.v_sync_width +
542 phantom_stream->timing.flags.DSC = 0; // Don't need DSC for phantom timing
546 * dcn32_get_num_free_pipes - Calculate number of free pipes
547 * @dc: current dc state
548 * @context: new dc state
550 * This function assumes that a "used" pipe is a pipe that has
551 * both a stream and a plane assigned to it.
553 * Return: Number of free pipes available in the context
555 static unsigned int dcn32_get_num_free_pipes(struct dc *dc, struct dc_state *context)
558 unsigned int free_pipes = 0;
559 unsigned int num_pipes = 0;
561 for (i = 0; i < dc->res_pool->pipe_count; i++) {
562 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
564 if (pipe->stream && !pipe->top_pipe) {
567 pipe = pipe->bottom_pipe;
572 free_pipes = dc->res_pool->pipe_count - num_pipes;
577 * dcn32_assign_subvp_pipe - Function to decide which pipe will use Sub-VP.
578 * @dc: current dc state
579 * @context: new dc state
580 * @index: [out] dc pipe index for the pipe chosen to have phantom pipes assigned
582 * We enter this function if we are Sub-VP capable (i.e. enough pipes available)
583 * and regular P-State switching (i.e. VACTIVE/VBLANK) is not supported, or if
584 * we are forcing SubVP P-State switching on the current config.
586 * The number of pipes used for the chosen surface must be less than or equal to the
587 * number of free pipes available.
589 * In general we choose surfaces with the longest frame time first (better for SubVP + VBLANK).
590 * For multi-display cases the ActiveDRAMClockChangeMargin doesn't provide enough info on its own
591 * for determining which should be the SubVP pipe (need a way to determine if a pipe / plane doesn't
592 * support MCLK switching naturally [i.e. ACTIVE or VBLANK]).
594 * Return: True if a valid pipe assignment was found for Sub-VP. Otherwise false.
596 static bool dcn32_assign_subvp_pipe(struct dc *dc,
597 struct dc_state *context,
600 unsigned int i, pipe_idx;
601 unsigned int max_frame_time = 0;
602 bool valid_assignment_found = false;
603 unsigned int free_pipes = dcn32_get_num_free_pipes(dc, context);
604 struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
606 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
607 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
608 unsigned int num_pipes = 0;
609 unsigned int refresh_rate = 0;
615 refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
616 pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
617 / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
618 /* SubVP pipe candidate requirements:
619 * - Refresh rate < 120hz
620 * - Not able to switch in vactive naturally (switching in active means the
621 * DET provides enough buffer to hide the P-State switch latency -- trying
622 * to combine this with SubVP can cause issues with the scheduling).
625 if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) &&
626 !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
627 (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
628 dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
629 (refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) &&
630 !pipe->plane_state->address.tmz_surface &&
631 (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 ||
632 (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 &&
633 dcn32_allow_subvp_with_active_margin(pipe)))) {
636 pipe = pipe->bottom_pipe;
639 pipe = &context->res_ctx.pipe_ctx[i];
640 if (num_pipes <= free_pipes) {
641 struct dc_stream_state *stream = pipe->stream;
642 unsigned int frame_us = (stream->timing.v_total * stream->timing.h_total /
643 (double)(stream->timing.pix_clk_100hz * 100)) * 1000000;
644 if (frame_us > max_frame_time) {
646 max_frame_time = frame_us;
647 valid_assignment_found = true;
653 return valid_assignment_found;
657 * dcn32_enough_pipes_for_subvp - Function to check if there are "enough" pipes for SubVP.
658 * @dc: current dc state
659 * @context: new dc state
661 * This function returns true if there are enough free pipes
662 * to create the required phantom pipes for any given stream
663 * (that does not already have phantom pipe assigned).
665 * e.g. For a 2 stream config where the first stream uses one
666 * pipe and the second stream uses 2 pipes (i.e. pipe split),
667 * this function will return true because there is 1 remaining
668 * pipe which can be used as the phantom pipe for the non pipe
672 * True if there are enough free pipes to assign phantom pipes to at least one
673 * stream that does not already have phantom pipes assigned. Otherwise false.
675 static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context)
677 unsigned int i, split_cnt, free_pipes;
678 unsigned int min_pipe_split = dc->res_pool->pipe_count + 1; // init as max number of pipes + 1
679 bool subvp_possible = false;
681 for (i = 0; i < dc->res_pool->pipe_count; i++) {
682 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
684 // Find the minimum pipe split count for non SubVP pipes
685 if (resource_is_pipe_type(pipe, OPP_HEAD) &&
686 dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE) {
690 pipe = pipe->bottom_pipe;
693 if (split_cnt < min_pipe_split)
694 min_pipe_split = split_cnt;
698 free_pipes = dcn32_get_num_free_pipes(dc, context);
700 // SubVP only possible if at least one pipe is being used (i.e. free_pipes
701 // should not equal to the pipe_count)
702 if (free_pipes >= min_pipe_split && free_pipes < dc->res_pool->pipe_count)
703 subvp_possible = true;
705 return subvp_possible;
709 * subvp_subvp_schedulable - Determine if SubVP + SubVP config is schedulable
710 * @dc: current dc state
711 * @context: new dc state
713 * High level algorithm:
714 * 1. Find longest microschedule length (in us) between the two SubVP pipes
715 * 2. Check if the worst case overlap (VBLANK in middle of ACTIVE) for both
716 * pipes still allows for the maximum microschedule to fit in the active
717 * region for both pipes.
719 * Return: True if the SubVP + SubVP config is schedulable, false otherwise
721 static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
723 struct pipe_ctx *subvp_pipes[2];
724 struct dc_stream_state *phantom = NULL;
725 uint32_t microschedule_lines = 0;
728 uint32_t max_microschedule_us = 0;
729 int32_t vactive1_us, vactive2_us, vblank1_us, vblank2_us;
731 for (i = 0; i < dc->res_pool->pipe_count; i++) {
732 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
733 uint32_t time_us = 0;
735 /* Loop to calculate the maximum microschedule time between the two SubVP pipes,
736 * and also to store the two main SubVP pipe pointers in subvp_pipes[2].
738 if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
739 dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
740 phantom = dc_state_get_paired_subvp_stream(context, pipe->stream);
741 microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) +
742 phantom->timing.v_addressable;
744 // Round up when calculating microschedule time (+ 1 at the end)
745 time_us = (microschedule_lines * phantom->timing.h_total) /
746 (double)(phantom->timing.pix_clk_100hz * 100) * 1000000 +
747 dc->caps.subvp_prefetch_end_to_mall_start_us +
748 dc->caps.subvp_fw_processing_delay_us + 1;
749 if (time_us > max_microschedule_us)
750 max_microschedule_us = time_us;
752 subvp_pipes[index] = pipe;
755 // Maximum 2 SubVP pipes
760 vactive1_us = ((subvp_pipes[0]->stream->timing.v_addressable * subvp_pipes[0]->stream->timing.h_total) /
761 (double)(subvp_pipes[0]->stream->timing.pix_clk_100hz * 100)) * 1000000;
762 vactive2_us = ((subvp_pipes[1]->stream->timing.v_addressable * subvp_pipes[1]->stream->timing.h_total) /
763 (double)(subvp_pipes[1]->stream->timing.pix_clk_100hz * 100)) * 1000000;
764 vblank1_us = (((subvp_pipes[0]->stream->timing.v_total - subvp_pipes[0]->stream->timing.v_addressable) *
765 subvp_pipes[0]->stream->timing.h_total) /
766 (double)(subvp_pipes[0]->stream->timing.pix_clk_100hz * 100)) * 1000000;
767 vblank2_us = (((subvp_pipes[1]->stream->timing.v_total - subvp_pipes[1]->stream->timing.v_addressable) *
768 subvp_pipes[1]->stream->timing.h_total) /
769 (double)(subvp_pipes[1]->stream->timing.pix_clk_100hz * 100)) * 1000000;
771 if ((vactive1_us - vblank2_us) / 2 > max_microschedule_us &&
772 (vactive2_us - vblank1_us) / 2 > max_microschedule_us)
779 * subvp_drr_schedulable() - Determine if SubVP + DRR config is schedulable
780 * @dc: current dc state
781 * @context: new dc state
783 * High level algorithm:
784 * 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe
785 * 2. Determine the frame time for the DRR display when adding required margin for MCLK switching
786 * (the margin is equal to the MALL region + DRR margin (500us))
787 * 3.If (SubVP Active - Prefetch > Stretched DRR frame + max(MALL region, Stretched DRR frame))
788 * then report the configuration as supported
790 * Return: True if the SubVP + DRR config is schedulable, false otherwise
792 static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
794 bool schedulable = false;
796 struct pipe_ctx *pipe = NULL;
797 struct pipe_ctx *drr_pipe = NULL;
798 struct dc_crtc_timing *main_timing = NULL;
799 struct dc_crtc_timing *phantom_timing = NULL;
800 struct dc_crtc_timing *drr_timing = NULL;
801 int16_t prefetch_us = 0;
802 int16_t mall_region_us = 0;
803 int16_t drr_frame_us = 0; // nominal frame time
804 int16_t subvp_active_us = 0;
805 int16_t stretched_drr_us = 0;
806 int16_t drr_stretched_vblank_us = 0;
807 int16_t max_vblank_mallregion = 0;
808 struct dc_stream_state *phantom_stream;
809 bool subvp_found = false;
810 bool drr_found = false;
813 for (i = 0; i < dc->res_pool->pipe_count; i++) {
814 pipe = &context->res_ctx.pipe_ctx[i];
816 // We check for master pipe, but it shouldn't matter since we only need
817 // the pipe for timing info (stream should be same for any pipe splits)
818 if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
819 !resource_is_pipe_type(pipe, DPP_PIPE))
822 // Find the SubVP pipe
823 if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
830 for (i = 0; i < dc->res_pool->pipe_count; i++) {
831 drr_pipe = &context->res_ctx.pipe_ctx[i];
833 // We check for master pipe only
834 if (!resource_is_pipe_type(drr_pipe, OTG_MASTER) ||
835 !resource_is_pipe_type(drr_pipe, DPP_PIPE))
838 if (dc_state_get_pipe_subvp_type(context, drr_pipe) == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param &&
839 (drr_pipe->stream->allow_freesync || drr_pipe->stream->vrr_active_variable || drr_pipe->stream->vrr_active_fixed)) {
845 if (subvp_found && drr_found) {
846 phantom_stream = dc_state_get_paired_subvp_stream(context, pipe->stream);
847 main_timing = &pipe->stream->timing;
848 phantom_timing = &phantom_stream->timing;
849 drr_timing = &drr_pipe->stream->timing;
850 prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total /
851 (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
852 dc->caps.subvp_prefetch_end_to_mall_start_us;
853 subvp_active_us = main_timing->v_addressable * main_timing->h_total /
854 (double)(main_timing->pix_clk_100hz * 100) * 1000000;
855 drr_frame_us = drr_timing->v_total * drr_timing->h_total /
856 (double)(drr_timing->pix_clk_100hz * 100) * 1000000;
857 // P-State allow width and FW delays already included phantom_timing->v_addressable
858 mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total /
859 (double)(phantom_timing->pix_clk_100hz * 100) * 1000000;
860 stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
861 drr_stretched_vblank_us = (drr_timing->v_total - drr_timing->v_addressable) * drr_timing->h_total /
862 (double)(drr_timing->pix_clk_100hz * 100) * 1000000 + (stretched_drr_us - drr_frame_us);
863 max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us;
866 /* We consider SubVP + DRR schedulable if the stretched frame duration of the DRR display (i.e. the
867 * highest refresh rate + margin that can support UCLK P-State switch) passes the static analysis
868 * for VBLANK: (VACTIVE region of the SubVP pipe can fit the MALL prefetch, VBLANK frame time,
869 * and the max of (VBLANK blanking time, MALL region)).
871 if (stretched_drr_us < (1 / (double)drr_timing->min_refresh_in_uhz) * 1000000 * 1000000 &&
872 subvp_active_us - prefetch_us - stretched_drr_us - max_vblank_mallregion > 0)
880 * subvp_vblank_schedulable - Determine if SubVP + VBLANK config is schedulable
881 * @dc: current dc state
882 * @context: new dc state
884 * High level algorithm:
885 * 1. Get timing for SubVP pipe, phantom pipe, and VBLANK pipe
886 * 2. If (SubVP Active - Prefetch > Vblank Frame Time + max(MALL region, Vblank blanking time))
887 * then report the configuration as supported
888 * 3. If the VBLANK display is DRR, then take the DRR static schedulability path
890 * Return: True if the SubVP + VBLANK/DRR config is schedulable, false otherwise
892 static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
894 struct pipe_ctx *pipe = NULL;
895 struct pipe_ctx *subvp_pipe = NULL;
897 bool schedulable = false;
899 uint8_t vblank_index = 0;
900 uint16_t prefetch_us = 0;
901 uint16_t mall_region_us = 0;
902 uint16_t vblank_frame_us = 0;
903 uint16_t subvp_active_us = 0;
904 uint16_t vblank_blank_us = 0;
905 uint16_t max_vblank_mallregion = 0;
906 struct dc_crtc_timing *main_timing = NULL;
907 struct dc_crtc_timing *phantom_timing = NULL;
908 struct dc_crtc_timing *vblank_timing = NULL;
909 struct dc_stream_state *phantom_stream;
910 enum mall_stream_type pipe_mall_type;
912 /* For SubVP + VBLANK/DRR cases, we assume there can only be
913 * a single VBLANK/DRR display. If DML outputs SubVP + VBLANK
914 * is supported, it is either a single VBLANK case or two VBLANK
915 * displays which are synchronized (in which case they have identical
918 for (i = 0; i < dc->res_pool->pipe_count; i++) {
919 pipe = &context->res_ctx.pipe_ctx[i];
920 pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
922 // We check for master pipe, but it shouldn't matter since we only need
923 // the pipe for timing info (stream should be same for any pipe splits)
924 if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
925 !resource_is_pipe_type(pipe, DPP_PIPE))
928 if (!found && pipe_mall_type == SUBVP_NONE) {
929 // Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe).
934 if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN)
938 phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
939 main_timing = &subvp_pipe->stream->timing;
940 phantom_timing = &phantom_stream->timing;
941 vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing;
942 // Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe
943 // Also include the prefetch end to mallstart delay time
944 prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total /
945 (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
946 dc->caps.subvp_prefetch_end_to_mall_start_us;
947 // P-State allow width and FW delays already included phantom_timing->v_addressable
948 mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total /
949 (double)(phantom_timing->pix_clk_100hz * 100) * 1000000;
950 vblank_frame_us = vblank_timing->v_total * vblank_timing->h_total /
951 (double)(vblank_timing->pix_clk_100hz * 100) * 1000000;
952 vblank_blank_us = (vblank_timing->v_total - vblank_timing->v_addressable) * vblank_timing->h_total /
953 (double)(vblank_timing->pix_clk_100hz * 100) * 1000000;
954 subvp_active_us = main_timing->v_addressable * main_timing->h_total /
955 (double)(main_timing->pix_clk_100hz * 100) * 1000000;
956 max_vblank_mallregion = vblank_blank_us > mall_region_us ? vblank_blank_us : mall_region_us;
958 // Schedulable if VACTIVE region of the SubVP pipe can fit the MALL prefetch, VBLANK frame time,
959 // and the max of (VBLANK blanking time, MALL region)
960 // TODO: Possibly add some margin (i.e. the below conditions should be [...] > X instead of [...] > 0)
961 if (subvp_active_us - prefetch_us - vblank_frame_us - max_vblank_mallregion > 0)
968 * subvp_subvp_admissable() - Determine if subvp + subvp config is admissible
970 * @dc: Current DC state
971 * @context: New DC state to be programmed
973 * SubVP + SubVP is admissible under the following conditions:
974 * - All SubVP pipes are < 120Hz OR
975 * - All SubVP pipes are >= 120hz
977 * Return: True if admissible, false otherwise
979 static bool subvp_subvp_admissable(struct dc *dc,
980 struct dc_state *context)
984 uint8_t subvp_count = 0;
985 uint32_t min_refresh = subvp_high_refresh_list.min_refresh, max_refresh = 0;
986 uint64_t refresh_rate = 0;
988 for (i = 0; i < dc->res_pool->pipe_count; i++) {
989 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
994 if (pipe->plane_state && !pipe->top_pipe &&
995 dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
996 refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
997 pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
998 refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
999 refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
1001 if ((uint32_t)refresh_rate < min_refresh)
1002 min_refresh = (uint32_t)refresh_rate;
1003 if ((uint32_t)refresh_rate > max_refresh)
1004 max_refresh = (uint32_t)refresh_rate;
1009 if (subvp_count == 2 && ((min_refresh < 120 && max_refresh < 120) ||
1010 (min_refresh >= subvp_high_refresh_list.min_refresh &&
1011 max_refresh <= subvp_high_refresh_list.max_refresh)))
1018 * subvp_validate_static_schedulability - Check which SubVP case is calculated
1019 * and handle static analysis based on the case.
1020 * @dc: current dc state
1021 * @context: new dc state
1022 * @vlevel: Voltage level calculated by DML
1026 * 2. SubVP + VBLANK (DRR checked internally)
1027 * 3. SubVP + VACTIVE (currently unsupported)
1029 * Return: True if statically schedulable, false otherwise
1031 static bool subvp_validate_static_schedulability(struct dc *dc,
1032 struct dc_state *context,
1035 bool schedulable = false;
1036 struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
1037 uint32_t i, pipe_idx;
1038 uint8_t subvp_count = 0;
1039 uint8_t vactive_count = 0;
1040 uint8_t non_subvp_pipes = 0;
1042 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
1043 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1044 enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
1049 if (pipe->plane_state && !pipe->top_pipe) {
1050 if (pipe_mall_type == SUBVP_MAIN)
1052 if (pipe_mall_type == SUBVP_NONE)
1056 // Count how many planes that aren't SubVP/phantom are capable of VACTIVE
1057 // switching (SubVP + VACTIVE unsupported). In situations where we force
1058 // SubVP for a VACTIVE plane, we don't want to increment the vactive_count.
1059 if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vlevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 &&
1060 pipe_mall_type == SUBVP_NONE) {
1066 if (subvp_count == 2) {
1067 // Static schedulability check for SubVP + SubVP case
1068 schedulable = subvp_subvp_admissable(dc, context) && subvp_subvp_schedulable(dc, context);
1069 } else if (subvp_count == 1 && non_subvp_pipes == 0) {
1070 // Single SubVP configs will be supported by default as long as it's suppported by DML
1072 } else if (subvp_count == 1 && non_subvp_pipes == 1) {
1073 if (dcn32_subvp_drr_admissable(dc, context))
1074 schedulable = subvp_drr_schedulable(dc, context);
1075 else if (dcn32_subvp_vblank_admissable(dc, context, vlevel))
1076 schedulable = subvp_vblank_schedulable(dc, context);
1077 } else if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vactive_w_mall_sub_vp &&
1078 vactive_count > 0) {
1079 // For single display SubVP cases, DML will output dm_dram_clock_change_vactive_w_mall_sub_vp by default.
1080 // We tell the difference between SubVP vs. SubVP + VACTIVE by checking the vactive_count.
1081 // SubVP + VACTIVE currently unsupported
1082 schedulable = false;
1087 static void assign_subvp_index(struct dc *dc, struct dc_state *context)
1092 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1093 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1095 if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) &&
1096 dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_MAIN) {
1097 pipe_ctx->subvp_index = index++;
1099 pipe_ctx->subvp_index = 0;
1104 struct pipe_slice_table {
1106 struct dc_stream_state *stream;
1108 } odm_combines[MAX_STREAMS];
1109 int odm_combine_count;
1112 struct pipe_ctx *pri_pipe;
1113 struct dc_plane_state *plane;
1115 } mpc_combines[MAX_SURFACES];
1116 int mpc_combine_count;
1120 static void update_slice_table_for_stream(struct pipe_slice_table *table,
1121 struct dc_stream_state *stream, int diff)
1125 for (i = 0; i < table->odm_combine_count; i++) {
1126 if (table->odm_combines[i].stream == stream) {
1127 table->odm_combines[i].slice_count += diff;
1132 if (i == table->odm_combine_count) {
1133 table->odm_combine_count++;
1134 table->odm_combines[i].stream = stream;
1135 table->odm_combines[i].slice_count = diff;
1139 static void update_slice_table_for_plane(struct pipe_slice_table *table,
1140 struct pipe_ctx *dpp_pipe, struct dc_plane_state *plane, int diff)
1143 struct pipe_ctx *pri_dpp_pipe = resource_get_primary_dpp_pipe(dpp_pipe);
1145 for (i = 0; i < table->mpc_combine_count; i++) {
1146 if (table->mpc_combines[i].plane == plane &&
1147 table->mpc_combines[i].pri_pipe == pri_dpp_pipe) {
1148 table->mpc_combines[i].slice_count += diff;
1153 if (i == table->mpc_combine_count) {
1154 table->mpc_combine_count++;
1155 table->mpc_combines[i].plane = plane;
1156 table->mpc_combines[i].pri_pipe = pri_dpp_pipe;
1157 table->mpc_combines[i].slice_count = diff;
1161 static void init_pipe_slice_table_from_context(
1162 struct pipe_slice_table *table,
1163 struct dc_state *context)
1166 struct pipe_ctx *otg_master;
1167 struct pipe_ctx *dpp_pipes[MAX_PIPES];
1168 struct dc_stream_state *stream;
1171 memset(table, 0, sizeof(*table));
1173 for (i = 0; i < context->stream_count; i++) {
1174 stream = context->streams[i];
1175 otg_master = resource_get_otg_master_for_stream(
1176 &context->res_ctx, stream);
1177 count = resource_get_odm_slice_count(otg_master);
1178 update_slice_table_for_stream(table, stream, count);
1180 count = resource_get_dpp_pipes_for_opp_head(otg_master,
1181 &context->res_ctx, dpp_pipes);
1182 for (j = 0; j < count; j++)
1183 if (dpp_pipes[j]->plane_state)
1184 update_slice_table_for_plane(table, dpp_pipes[j],
1185 dpp_pipes[j]->plane_state, 1);
1189 static bool update_pipe_slice_table_with_split_flags(
1190 struct pipe_slice_table *table,
1192 struct dc_state *context,
1193 struct vba_vars_st *vba,
1194 int split[MAX_PIPES],
1195 bool merge[MAX_PIPES])
1197 /* NOTE: we are deprecating the support for the concept of pipe splitting
1198 * or pipe merging. Instead we append slices to the end and remove
1199 * slices from the end. The following code converts a pipe split or
1200 * merge to an append or remove operation.
1203 * When split flags describe the following pipe connection transition
1206 * pipe 0 (split=2) -> pipe 1 (split=2)
1207 * to: (old behavior)
1208 * pipe 0 -> pipe 2 -> pipe 1 -> pipe 3
1210 * the code below actually does:
1211 * pipe 0 -> pipe 1 -> pipe 2 -> pipe 3
1213 * This is the new intended behavior and for future DCNs we will retire
1214 * the old concept completely.
1216 struct pipe_ctx *pipe;
1218 int dc_pipe_idx, dml_pipe_idx = 0;
1219 bool updated = false;
1221 for (dc_pipe_idx = 0;
1222 dc_pipe_idx < dc->res_pool->pipe_count; dc_pipe_idx++) {
1223 pipe = &context->res_ctx.pipe_ctx[dc_pipe_idx];
1224 if (resource_is_pipe_type(pipe, FREE_PIPE))
1227 if (merge[dc_pipe_idx]) {
1228 if (resource_is_pipe_type(pipe, OPP_HEAD))
1229 /* merging OPP head means reducing ODM slice
1232 update_slice_table_for_stream(table, pipe->stream, -1);
1233 else if (resource_is_pipe_type(pipe, DPP_PIPE) &&
1234 resource_get_odm_slice_index(resource_get_opp_head(pipe)) == 0)
1235 /* merging DPP pipe of the first ODM slice means
1236 * reducing MPC slice count by 1
1238 update_slice_table_for_plane(table, pipe, pipe->plane_state, -1);
1242 if (split[dc_pipe_idx]) {
1243 odm = vba->ODMCombineEnabled[vba->pipe_plane[dml_pipe_idx]] !=
1244 dm_odm_combine_mode_disabled;
1245 if (odm && resource_is_pipe_type(pipe, OPP_HEAD))
1246 update_slice_table_for_stream(
1247 table, pipe->stream, split[dc_pipe_idx] - 1);
1248 else if (!odm && resource_is_pipe_type(pipe, DPP_PIPE))
1249 update_slice_table_for_plane(table, pipe,
1250 pipe->plane_state, split[dc_pipe_idx] - 1);
1258 static void update_pipes_with_slice_table(struct dc *dc, struct dc_state *context,
1259 struct pipe_slice_table *table)
1263 for (i = 0; i < table->odm_combine_count; i++)
1264 resource_update_pipes_for_stream_with_slice_count(context,
1265 dc->current_state, dc->res_pool,
1266 table->odm_combines[i].stream,
1267 table->odm_combines[i].slice_count);
1269 for (i = 0; i < table->mpc_combine_count; i++)
1270 resource_update_pipes_for_plane_with_slice_count(context,
1271 dc->current_state, dc->res_pool,
1272 table->mpc_combines[i].plane,
1273 table->mpc_combines[i].slice_count);
1276 static bool update_pipes_with_split_flags(struct dc *dc, struct dc_state *context,
1277 struct vba_vars_st *vba, int split[MAX_PIPES],
1278 bool merge[MAX_PIPES])
1280 struct pipe_slice_table slice_table;
1283 init_pipe_slice_table_from_context(&slice_table, context);
1284 updated = update_pipe_slice_table_with_split_flags(
1285 &slice_table, dc, context, vba,
1287 update_pipes_with_slice_table(dc, context, &slice_table);
1291 static bool should_allow_odm_power_optimization(struct dc *dc,
1292 struct dc_state *context, struct vba_vars_st *v, int *split,
1295 struct dc_stream_state *stream = context->streams[0];
1296 struct pipe_slice_table slice_table;
1300 * this debug flag allows us to disable ODM power optimization feature
1301 * unconditionally. we force the feature off if this is set to false.
1303 if (!dc->debug.enable_single_display_2to1_odm_policy)
1306 /* current design and test coverage is only limited to allow ODM power
1307 * optimization for single stream. Supporting it for multiple streams
1308 * use case would require additional algorithm to decide how to
1309 * optimize power consumption when there are not enough free pipes to
1310 * allocate for all the streams. This level of optimization would
1311 * require multiple attempts of revalidation to make an optimized
1312 * decision. Unfortunately We do not support revalidation flow in
1313 * current version of DML.
1315 if (context->stream_count != 1)
1319 * Our hardware doesn't support ODM for HDMI TMDS
1321 if (dc_is_hdmi_signal(stream->signal))
1325 * ODM Combine 2:1 requires horizontal timing divisible by 2 so each
1326 * ODM segment has the same size.
1328 if (!is_h_timing_divisible_by_2(stream))
1332 * No power benefits if the timing's pixel clock is not high enough to
1333 * raise display clock from minimum power state.
1335 if (stream->timing.pix_clk_100hz * 100 <= DCN3_2_VMIN_DISPCLK_HZ)
1338 if (dc->config.enable_windowed_mpo_odm) {
1340 * ODM power optimization should only be allowed if the feature
1341 * can be seamlessly toggled off within an update. This would
1342 * require that the feature is applied on top of a minimal
1343 * state. A minimal state is defined as a state validated
1344 * without the need of pipe split. Therefore, when transition to
1345 * toggle the feature off, the same stream and plane
1346 * configuration can be supported by the pipe resource in the
1347 * first ODM slice alone without the need to acquire extra
1350 init_pipe_slice_table_from_context(&slice_table, context);
1351 update_pipe_slice_table_with_split_flags(
1352 &slice_table, dc, context, v,
1354 for (i = 0; i < slice_table.mpc_combine_count; i++)
1355 if (slice_table.mpc_combines[i].slice_count > 1)
1358 for (i = 0; i < slice_table.odm_combine_count; i++)
1359 if (slice_table.odm_combines[i].slice_count > 1)
1363 * the new ODM power optimization feature reduces software
1364 * design limitation and allows ODM power optimization to be
1365 * supported even with presence of overlay planes. The new
1366 * feature is enabled based on enable_windowed_mpo_odm flag. If
1367 * the flag is not set, we limit our feature scope due to
1368 * previous software design limitation
1370 if (context->stream_status[0].plane_count != 1)
1373 if (memcmp(&context->stream_status[0].plane_states[0]->clip_rect,
1374 &stream->src, sizeof(struct rect)) != 0)
1377 if (stream->src.width >= 5120 &&
1378 stream->src.width > stream->dst.width)
1384 static void try_odm_power_optimization_and_revalidate(
1386 struct dc_state *context,
1387 display_e2e_pipe_params_st *pipes,
1390 unsigned int *vlevel,
1394 unsigned int new_vlevel;
1396 for (i = 0; i < pipe_cnt; i++)
1397 pipes[i].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
1399 new_vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
1401 if (new_vlevel < context->bw_ctx.dml.soc.num_states) {
1402 memset(split, 0, MAX_PIPES * sizeof(int));
1403 memset(merge, 0, MAX_PIPES * sizeof(bool));
1404 *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, new_vlevel, split, merge);
1405 context->bw_ctx.dml.vba.VoltageLevel = *vlevel;
1409 static bool is_test_pattern_enabled(
1410 struct dc_state *context)
1414 for (i = 0; i < context->stream_count; i++) {
1415 if (context->streams[i]->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE)
1422 static void dcn32_full_validate_bw_helper(struct dc *dc,
1423 struct dc_state *context,
1424 display_e2e_pipe_params_st *pipes,
1430 struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
1431 unsigned int dc_pipe_idx = 0;
1433 bool found_supported_config = false;
1434 int vlevel_temp = 0;
1436 dc_assert_fp_enabled();
1439 * DML favors voltage over p-state, but we're more interested in
1440 * supporting p-state over voltage. We can't support p-state in
1441 * prefetch mode > 0 so try capping the prefetch mode to start.
1442 * Override present for testing.
1444 if (dc->debug.dml_disallow_alternate_prefetch_modes)
1445 context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
1446 dm_prefetch_support_uclk_fclk_and_stutter;
1448 context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
1449 dm_prefetch_support_uclk_fclk_and_stutter_if_possible;
1451 *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
1452 /* This may adjust vlevel and maxMpcComb */
1453 if (*vlevel < context->bw_ctx.dml.soc.num_states) {
1454 *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
1455 vba->VoltageLevel = *vlevel;
1458 /* Conditions for setting up phantom pipes for SubVP:
1459 * 1. Not force disable SubVP
1460 * 2. Full update (i.e. !fast_validate)
1461 * 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?)
1462 * 4. Display configuration passes validation
1463 * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
1465 if (!dc->debug.force_disable_subvp && !dc->caps.dmub_caps.gecc_enable && dcn32_all_pipes_have_stream_and_plane(dc, context) &&
1466 !dcn32_mpo_in_use(context) && !dcn32_any_surfaces_rotated(dc, context) && !is_test_pattern_enabled(context) &&
1467 (*vlevel == context->bw_ctx.dml.soc.num_states || (vba->DRAMSpeedPerState[*vlevel] != vba->DRAMSpeedPerState[0] &&
1468 vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported) ||
1469 vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported ||
1470 dc->debug.force_subvp_mclk_switch)) {
1472 dcn32_merge_pipes_for_subvp(dc, context);
1473 memset(merge, 0, MAX_PIPES * sizeof(bool));
1475 vlevel_temp = *vlevel;
1476 /* to re-initialize viewport after the pipe merge */
1477 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1478 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1480 if (!pipe_ctx->plane_state || !pipe_ctx->stream)
1483 resource_build_scaling_params(pipe_ctx);
1486 while (!found_supported_config && dcn32_enough_pipes_for_subvp(dc, context) &&
1487 dcn32_assign_subvp_pipe(dc, context, &dc_pipe_idx)) {
1488 /* For the case where *vlevel = num_states, bandwidth validation has failed for this config.
1489 * Adding phantom pipes won't change the validation result, so change the DML input param
1490 * for P-State support before adding phantom pipes and recalculating the DML result.
1491 * However, this case is only applicable for SubVP + DRR cases because the prefetch mode
1492 * will not allow for switch in VBLANK. The DRR display must have it's VBLANK stretched
1493 * enough to support MCLK switching.
1495 if (*vlevel == context->bw_ctx.dml.soc.num_states &&
1496 context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final ==
1497 dm_prefetch_support_uclk_fclk_and_stutter) {
1498 context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
1499 dm_prefetch_support_fclk_and_stutter;
1500 /* There are params (such as FabricClock) that need to be recalculated
1501 * after validation fails (otherwise it will be 0). Calculation for
1502 * phantom vactive requires call into DML, so we must ensure all the
1503 * vba params are valid otherwise we'll get incorrect phantom vactive.
1505 *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
1508 dc->res_pool->funcs->add_phantom_pipes(dc, context, pipes, *pipe_cnt, dc_pipe_idx);
1510 *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false);
1511 // Populate dppclk to trigger a recalculate in dml_get_voltage_level
1512 // so the phantom pipe DLG params can be assigned correctly.
1513 pipes[0].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, *pipe_cnt, 0);
1514 *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
1516 /* Check that vlevel requested supports pstate or not
1517 * if not, select the lowest vlevel that supports it
1519 for (i = *vlevel; i < context->bw_ctx.dml.soc.num_states; i++) {
1520 if (vba->DRAMClockChangeSupport[i][vba->maxMpcComb] != dm_dram_clock_change_unsupported) {
1526 if (*vlevel < context->bw_ctx.dml.soc.num_states
1527 && subvp_validate_static_schedulability(dc, context, *vlevel))
1528 found_supported_config = true;
1529 if (found_supported_config) {
1530 // For SubVP + DRR cases, we can force the lowest vlevel that supports the mode
1531 if (dcn32_subvp_drr_admissable(dc, context) && subvp_drr_schedulable(dc, context)) {
1532 /* find lowest vlevel that supports the config */
1533 for (i = *vlevel; i >= 0; i--) {
1534 if (vba->ModeSupport[i][vba->maxMpcComb]) {
1544 if (vba->DRAMSpeedPerState[*vlevel] >= vba->DRAMSpeedPerState[vlevel_temp])
1545 found_supported_config = false;
1547 // If SubVP pipe config is unsupported (or cannot be used for UCLK switching)
1548 // remove phantom pipes and repopulate dml pipes
1549 if (!found_supported_config) {
1550 dc_state_remove_phantom_streams_and_planes(dc, context);
1551 dc_state_release_phantom_streams_and_planes(dc, context);
1552 vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported;
1553 *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false);
1555 *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
1556 /* This may adjust vlevel and maxMpcComb */
1557 if (*vlevel < context->bw_ctx.dml.soc.num_states) {
1558 *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
1559 vba->VoltageLevel = *vlevel;
1562 // Most populate phantom DLG params before programming hardware / timing for phantom pipe
1563 dcn32_helper_populate_phantom_dlg_params(dc, context, pipes, *pipe_cnt);
1565 /* Call validate_apply_pipe_split flags after calling DML getters for
1566 * phantom dlg params, or some of the VBA params indicating pipe split
1567 * can be overwritten by the getters.
1569 * When setting up SubVP config, all pipes are merged before attempting to
1570 * add phantom pipes. If pipe split (ODM / MPC) is required, both the main
1571 * and phantom pipes will be split in the regular pipe splitting sequence.
1573 memset(split, 0, MAX_PIPES * sizeof(int));
1574 memset(merge, 0, MAX_PIPES * sizeof(bool));
1575 *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
1576 vba->VoltageLevel = *vlevel;
1577 // Note: We can't apply the phantom pipes to hardware at this time. We have to wait
1578 // until driver has acquired the DMCUB lock to do it safely.
1579 assign_subvp_index(dc, context);
1583 if (should_allow_odm_power_optimization(dc, context, vba, split, merge))
1584 try_odm_power_optimization_and_revalidate(
1585 dc, context, pipes, split, merge, vlevel, *pipe_cnt);
1589 static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
1593 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1594 if (!context->res_ctx.pipe_ctx[i].stream)
1596 if (dc->link_srv->dp_is_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
1602 static void dcn20_adjust_freesync_v_startup(const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start)
1604 struct dc_crtc_timing patched_crtc_timing;
1605 uint32_t asic_blank_end = 0;
1606 uint32_t asic_blank_start = 0;
1607 uint32_t newVstartup = 0;
1609 patched_crtc_timing = *dc_crtc_timing;
1611 if (patched_crtc_timing.flags.INTERLACE == 1) {
1612 if (patched_crtc_timing.v_front_porch < 2)
1613 patched_crtc_timing.v_front_porch = 2;
1615 if (patched_crtc_timing.v_front_porch < 1)
1616 patched_crtc_timing.v_front_porch = 1;
1619 /* blank_start = frame end - front porch */
1620 asic_blank_start = patched_crtc_timing.v_total -
1621 patched_crtc_timing.v_front_porch;
1623 /* blank_end = blank_start - active */
1624 asic_blank_end = asic_blank_start -
1625 patched_crtc_timing.v_border_bottom -
1626 patched_crtc_timing.v_addressable -
1627 patched_crtc_timing.v_border_top;
1629 newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start);
1631 *vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start);
1634 static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
1635 display_e2e_pipe_params_st *pipes,
1636 int pipe_cnt, int vlevel)
1638 int i, pipe_idx, active_hubp_count = 0;
1639 bool usr_retraining_support = false;
1640 bool unbounded_req_enabled = false;
1641 struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
1643 dc_assert_fp_enabled();
1645 /* Writeback MCIF_WB arbitration parameters */
1646 dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
1648 context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000;
1649 context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000;
1650 context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000;
1651 context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
1652 context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000;
1653 context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000;
1654 context->bw_ctx.bw.dcn.clk.p_state_change_support =
1655 context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
1656 != dm_dram_clock_change_unsupported;
1658 /* Pstate change might not be supported by hardware, but it might be
1659 * possible with firmware driven vertical blank stretching.
1661 context->bw_ctx.bw.dcn.clk.p_state_change_support |= context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching;
1663 context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
1664 context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);
1665 context->bw_ctx.bw.dcn.clk.ref_dtbclk_khz = context->bw_ctx.dml.vba.DTBCLKPerState[vlevel] * 1000;
1666 if (context->bw_ctx.dml.vba.FCLKChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_fclock_change_unsupported)
1667 context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = false;
1669 context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = true;
1671 usr_retraining_support = context->bw_ctx.dml.vba.USRRetrainingSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
1672 ASSERT(usr_retraining_support);
1674 if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
1675 context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz;
1677 unbounded_req_enabled = get_unbounded_request_enabled(&context->bw_ctx.dml, pipes, pipe_cnt);
1679 if (unbounded_req_enabled && pipe_cnt > 1) {
1680 // Unbounded requesting should not ever be used when more than 1 pipe is enabled.
1682 unbounded_req_enabled = false;
1685 context->bw_ctx.bw.dcn.mall_ss_size_bytes = 0;
1686 context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes = 0;
1687 context->bw_ctx.bw.dcn.mall_subvp_size_bytes = 0;
1689 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
1690 if (!context->res_ctx.pipe_ctx[i].stream)
1692 if (context->res_ctx.pipe_ctx[i].plane_state)
1693 active_hubp_count++;
1694 pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt,
1696 pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt,
1698 pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt,
1700 pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt,
1703 if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) {
1704 // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
1705 context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
1706 context->res_ctx.pipe_ctx[i].unbounded_req = false;
1708 context->res_ctx.pipe_ctx[i].det_buffer_size_kb = get_det_buffer_size_kbytes(&context->bw_ctx.dml, pipes, pipe_cnt,
1710 context->res_ctx.pipe_ctx[i].unbounded_req = unbounded_req_enabled;
1713 if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
1714 context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
1715 if (context->res_ctx.pipe_ctx[i].plane_state)
1716 context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
1718 context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0;
1719 context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
1721 context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes = get_surface_size_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
1723 if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0)
1724 context->res_ctx.pipe_ctx[i].has_vactive_margin = true;
1726 context->res_ctx.pipe_ctx[i].has_vactive_margin = false;
1728 /* MALL Allocation Sizes */
1729 /* count from active, top pipes per plane only */
1730 if (context->res_ctx.pipe_ctx[i].stream && context->res_ctx.pipe_ctx[i].plane_state &&
1731 (context->res_ctx.pipe_ctx[i].top_pipe == NULL ||
1732 context->res_ctx.pipe_ctx[i].plane_state != context->res_ctx.pipe_ctx[i].top_pipe->plane_state) &&
1733 context->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
1734 /* SS: all active surfaces stored in MALL */
1735 if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) != SUBVP_PHANTOM) {
1736 context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes;
1738 if (context->res_ctx.pipe_ctx[i].stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) {
1739 /* SS PSR On: all active surfaces part of streams not supporting PSR stored in MALL */
1740 context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes;
1743 /* SUBVP: phantom surfaces only stored in MALL */
1744 context->bw_ctx.bw.dcn.mall_subvp_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes;
1748 if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
1749 dcn20_adjust_freesync_v_startup(
1750 &context->res_ctx.pipe_ctx[i].stream->timing,
1751 &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
1755 /* If DCN isn't making memory requests we can allow pstate change and lower clocks */
1756 if (!active_hubp_count) {
1757 context->bw_ctx.bw.dcn.clk.socclk_khz = 0;
1758 context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
1759 context->bw_ctx.bw.dcn.clk.dcfclk_khz = 0;
1760 context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = 0;
1761 context->bw_ctx.bw.dcn.clk.dramclk_khz = 0;
1762 context->bw_ctx.bw.dcn.clk.fclk_khz = 0;
1763 context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
1764 context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = true;
1766 /*save a original dppclock copy*/
1767 context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
1768 context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
1769 context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz
1771 context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz
1774 context->bw_ctx.bw.dcn.clk.num_ways = dcn32_helper_calculate_num_ways_for_subvp(dc, context);
1776 context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes;
1778 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1779 if (context->res_ctx.pipe_ctx[i].stream)
1780 context->bw_ctx.bw.dcn.compbuf_size_kb -= context->res_ctx.pipe_ctx[i].det_buffer_size_kb;
1783 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
1785 if (!context->res_ctx.pipe_ctx[i].stream)
1788 context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg_v2(&context->bw_ctx.dml,
1789 &context->res_ctx.pipe_ctx[i].dlg_regs, &context->res_ctx.pipe_ctx[i].ttu_regs, pipes,
1790 pipe_cnt, pipe_idx);
1792 context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg_v2(&context->res_ctx.pipe_ctx[i].rq_regs,
1793 &context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
1798 static struct pipe_ctx *dcn32_find_split_pipe(
1800 struct dc_state *context,
1803 struct pipe_ctx *pipe = NULL;
1806 if (old_index >= 0 && context->res_ctx.pipe_ctx[old_index].stream == NULL) {
1807 pipe = &context->res_ctx.pipe_ctx[old_index];
1808 pipe->pipe_idx = old_index;
1812 for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
1813 if (dc->current_state->res_ctx.pipe_ctx[i].top_pipe == NULL
1814 && dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
1815 if (context->res_ctx.pipe_ctx[i].stream == NULL) {
1816 pipe = &context->res_ctx.pipe_ctx[i];
1824 * May need to fix pipes getting tossed from 1 opp to another on flip
1825 * Add for debugging transient underflow during topology updates:
1829 for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
1830 if (context->res_ctx.pipe_ctx[i].stream == NULL) {
1831 pipe = &context->res_ctx.pipe_ctx[i];
1840 static bool dcn32_split_stream_for_mpc_or_odm(
1841 const struct dc *dc,
1842 struct resource_context *res_ctx,
1843 struct pipe_ctx *pri_pipe,
1844 struct pipe_ctx *sec_pipe,
1847 int pipe_idx = sec_pipe->pipe_idx;
1848 const struct resource_pool *pool = dc->res_pool;
1850 DC_LOGGER_INIT(dc->ctx->logger);
1852 if (odm && pri_pipe->plane_state) {
1853 /* ODM + window MPO, where MPO window is on left half only */
1854 if (pri_pipe->plane_state->clip_rect.x + pri_pipe->plane_state->clip_rect.width <=
1855 pri_pipe->stream->src.x + pri_pipe->stream->src.width/2) {
1857 DC_LOG_SCALER("%s - ODM + window MPO(left). pri_pipe:%d\n",
1859 pri_pipe->pipe_idx);
1863 /* ODM + window MPO, where MPO window is on right half only */
1864 if (pri_pipe->plane_state->clip_rect.x >= pri_pipe->stream->src.x + pri_pipe->stream->src.width/2) {
1866 DC_LOG_SCALER("%s - ODM + window MPO(right). pri_pipe:%d\n",
1868 pri_pipe->pipe_idx);
1873 *sec_pipe = *pri_pipe;
1875 sec_pipe->pipe_idx = pipe_idx;
1876 sec_pipe->plane_res.mi = pool->mis[pipe_idx];
1877 sec_pipe->plane_res.hubp = pool->hubps[pipe_idx];
1878 sec_pipe->plane_res.ipp = pool->ipps[pipe_idx];
1879 sec_pipe->plane_res.xfm = pool->transforms[pipe_idx];
1880 sec_pipe->plane_res.dpp = pool->dpps[pipe_idx];
1881 sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst;
1882 sec_pipe->stream_res.dsc = NULL;
1884 if (pri_pipe->next_odm_pipe) {
1885 ASSERT(pri_pipe->next_odm_pipe != sec_pipe);
1886 sec_pipe->next_odm_pipe = pri_pipe->next_odm_pipe;
1887 sec_pipe->next_odm_pipe->prev_odm_pipe = sec_pipe;
1889 if (pri_pipe->top_pipe && pri_pipe->top_pipe->next_odm_pipe) {
1890 pri_pipe->top_pipe->next_odm_pipe->bottom_pipe = sec_pipe;
1891 sec_pipe->top_pipe = pri_pipe->top_pipe->next_odm_pipe;
1893 if (pri_pipe->bottom_pipe && pri_pipe->bottom_pipe->next_odm_pipe) {
1894 pri_pipe->bottom_pipe->next_odm_pipe->top_pipe = sec_pipe;
1895 sec_pipe->bottom_pipe = pri_pipe->bottom_pipe->next_odm_pipe;
1897 pri_pipe->next_odm_pipe = sec_pipe;
1898 sec_pipe->prev_odm_pipe = pri_pipe;
1899 ASSERT(sec_pipe->top_pipe == NULL);
1901 if (!sec_pipe->top_pipe)
1902 sec_pipe->stream_res.opp = pool->opps[pipe_idx];
1904 sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp;
1905 if (sec_pipe->stream->timing.flags.DSC == 1) {
1906 dcn20_acquire_dsc(dc, res_ctx, &sec_pipe->stream_res.dsc, pipe_idx);
1907 ASSERT(sec_pipe->stream_res.dsc);
1908 if (sec_pipe->stream_res.dsc == NULL)
1912 if (pri_pipe->bottom_pipe) {
1913 ASSERT(pri_pipe->bottom_pipe != sec_pipe);
1914 sec_pipe->bottom_pipe = pri_pipe->bottom_pipe;
1915 sec_pipe->bottom_pipe->top_pipe = sec_pipe;
1917 pri_pipe->bottom_pipe = sec_pipe;
1918 sec_pipe->top_pipe = pri_pipe;
1920 ASSERT(pri_pipe->plane_state);
1926 bool dcn32_internal_validate_bw(struct dc *dc,
1927 struct dc_state *context,
1928 display_e2e_pipe_params_st *pipes,
1934 bool repopulate_pipes = false;
1935 int split[MAX_PIPES] = { 0 };
1936 bool merge[MAX_PIPES] = { false };
1937 bool newly_split[MAX_PIPES] = { false };
1938 int pipe_cnt, i, pipe_idx;
1939 int vlevel = context->bw_ctx.dml.soc.num_states;
1940 struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
1942 dc_assert_fp_enabled();
1948 // For each full update, remove all existing phantom pipes first
1949 dc_state_remove_phantom_streams_and_planes(dc, context);
1950 dc_state_release_phantom_streams_and_planes(dc, context);
1952 dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
1954 pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
1961 dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
1962 context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context);
1965 dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt);
1967 if (fast_validate ||
1968 (dc->debug.dml_disallow_alternate_prefetch_modes &&
1969 (vlevel == context->bw_ctx.dml.soc.num_states ||
1970 vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) {
1972 * If dml_disallow_alternate_prefetch_modes is false, then we have already
1973 * tried alternate prefetch modes during full validation.
1975 * If mode is unsupported or there is no p-state support, then
1976 * fall back to favouring voltage.
1978 * If Prefetch mode 0 failed for this config, or passed with Max UCLK, then try
1979 * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
1981 context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
1982 dm_prefetch_support_none;
1984 context->bw_ctx.dml.validate_max_state = fast_validate;
1985 vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
1987 context->bw_ctx.dml.validate_max_state = false;
1989 if (vlevel < context->bw_ctx.dml.soc.num_states) {
1990 memset(split, 0, sizeof(split));
1991 memset(merge, 0, sizeof(merge));
1992 vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
1993 // dcn20_validate_apply_pipe_split_flags can modify voltage level outside of DML
1994 vba->VoltageLevel = vlevel;
1998 dml_log_mode_support_params(&context->bw_ctx.dml);
2000 if (vlevel == context->bw_ctx.dml.soc.num_states)
2003 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
2004 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2005 struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
2010 if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
2011 && !dc->config.enable_windowed_mpo_odm
2012 && pipe->plane_state && mpo_pipe
2013 && memcmp(&mpo_pipe->plane_state->clip_rect,
2015 sizeof(struct rect)) != 0) {
2016 ASSERT(mpo_pipe->plane_state != pipe->plane_state);
2022 if (dc->config.enable_windowed_mpo_odm) {
2023 repopulate_pipes = update_pipes_with_split_flags(
2024 dc, context, vba, split, merge);
2026 /* the code below will be removed once windowed mpo odm is fully
2029 /* merge pipes if necessary */
2030 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2031 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2033 /*skip pipes that don't need merging*/
2037 /* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */
2038 if (pipe->prev_odm_pipe) {
2039 /*split off odm pipe*/
2040 pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe;
2041 if (pipe->next_odm_pipe)
2042 pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe;
2044 /*2:1ODM+MPC Split MPO to Single Pipe + MPC Split MPO*/
2045 if (pipe->bottom_pipe) {
2046 if (pipe->bottom_pipe->prev_odm_pipe || pipe->bottom_pipe->next_odm_pipe) {
2047 /*MPC split rules will handle this case*/
2048 pipe->bottom_pipe->top_pipe = NULL;
2050 /* when merging an ODM pipes, the bottom MPC pipe must now point to
2051 * the previous ODM pipe and its associated stream assets
2053 if (pipe->prev_odm_pipe->bottom_pipe) {
2055 pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe->bottom_pipe;
2056 pipe->prev_odm_pipe->bottom_pipe->bottom_pipe = pipe->bottom_pipe;
2059 pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe;
2060 pipe->prev_odm_pipe->bottom_pipe = pipe->bottom_pipe;
2063 memcpy(&pipe->bottom_pipe->stream_res, &pipe->bottom_pipe->top_pipe->stream_res, sizeof(struct stream_resource));
2067 if (pipe->top_pipe) {
2068 pipe->top_pipe->bottom_pipe = NULL;
2071 pipe->bottom_pipe = NULL;
2072 pipe->next_odm_pipe = NULL;
2073 pipe->plane_state = NULL;
2074 pipe->stream = NULL;
2075 pipe->top_pipe = NULL;
2076 pipe->prev_odm_pipe = NULL;
2077 if (pipe->stream_res.dsc)
2078 dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc);
2079 memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
2080 memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
2081 memset(&pipe->link_res, 0, sizeof(pipe->link_res));
2082 repopulate_pipes = true;
2083 } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
2084 struct pipe_ctx *top_pipe = pipe->top_pipe;
2085 struct pipe_ctx *bottom_pipe = pipe->bottom_pipe;
2087 top_pipe->bottom_pipe = bottom_pipe;
2089 bottom_pipe->top_pipe = top_pipe;
2091 pipe->top_pipe = NULL;
2092 pipe->bottom_pipe = NULL;
2093 pipe->plane_state = NULL;
2094 pipe->stream = NULL;
2095 memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
2096 memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
2097 memset(&pipe->link_res, 0, sizeof(pipe->link_res));
2098 repopulate_pipes = true;
2100 ASSERT(0); /* Should never try to merge master pipe */
2104 for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
2105 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2106 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2107 struct pipe_ctx *hsplit_pipe = NULL;
2111 if (!pipe->stream || newly_split[i])
2115 odm = vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled;
2117 if (!pipe->plane_state && !odm)
2122 if (split[i] == 4 && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe)
2123 old_index = old_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
2124 else if (old_pipe->next_odm_pipe)
2125 old_index = old_pipe->next_odm_pipe->pipe_idx;
2127 if (split[i] == 4 && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
2128 old_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
2129 old_index = old_pipe->bottom_pipe->bottom_pipe->pipe_idx;
2130 else if (old_pipe->bottom_pipe &&
2131 old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
2132 old_index = old_pipe->bottom_pipe->pipe_idx;
2134 hsplit_pipe = dcn32_find_split_pipe(dc, context, old_index);
2135 ASSERT(hsplit_pipe);
2139 if (!dcn32_split_stream_for_mpc_or_odm(
2140 dc, &context->res_ctx,
2141 pipe, hsplit_pipe, odm))
2144 newly_split[hsplit_pipe->pipe_idx] = true;
2145 repopulate_pipes = true;
2147 if (split[i] == 4) {
2148 struct pipe_ctx *pipe_4to1;
2150 if (odm && old_pipe->next_odm_pipe)
2151 old_index = old_pipe->next_odm_pipe->pipe_idx;
2152 else if (!odm && old_pipe->bottom_pipe &&
2153 old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
2154 old_index = old_pipe->bottom_pipe->pipe_idx;
2157 pipe_4to1 = dcn32_find_split_pipe(dc, context, old_index);
2161 if (!dcn32_split_stream_for_mpc_or_odm(
2162 dc, &context->res_ctx,
2163 pipe, pipe_4to1, odm))
2165 newly_split[pipe_4to1->pipe_idx] = true;
2167 if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe
2168 && old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe)
2169 old_index = old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
2170 else if (!odm && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
2171 old_pipe->bottom_pipe->bottom_pipe->bottom_pipe &&
2172 old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
2173 old_index = old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx;
2176 pipe_4to1 = dcn32_find_split_pipe(dc, context, old_index);
2180 if (!dcn32_split_stream_for_mpc_or_odm(
2181 dc, &context->res_ctx,
2182 hsplit_pipe, pipe_4to1, odm))
2184 newly_split[pipe_4to1->pipe_idx] = true;
2187 dcn20_build_mapped_resource(dc, context, pipe->stream);
2190 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2191 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2193 if (pipe->plane_state) {
2194 if (!resource_build_scaling_params(pipe))
2200 /* Actual dsc count per stream dsc validation*/
2201 if (!dcn20_validate_dsc(dc, context)) {
2202 vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE;
2206 if (repopulate_pipes) {
2207 int flag_max_mpc_comb = vba->maxMpcComb;
2208 int flag_vlevel = vlevel;
2211 pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
2212 dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes);
2214 /* repopulate_pipes = 1 means the pipes were either split or merged. In this case
2215 * we have to re-calculate the DET allocation and run through DML once more to
2216 * ensure all the params are calculated correctly. We do not need to run the
2217 * pipe split check again after this call (pipes are already split / merged).
2219 context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
2220 dm_prefetch_support_uclk_fclk_and_stutter_if_possible;
2222 vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
2224 if (vlevel == context->bw_ctx.dml.soc.num_states) {
2225 /* failed after DET size changes */
2227 } else if (flag_max_mpc_comb == 0 &&
2228 flag_max_mpc_comb != context->bw_ctx.dml.vba.maxMpcComb) {
2229 /* check the context constructed with pipe split flags is still valid*/
2230 bool flags_valid = false;
2231 for (i = flag_vlevel; i < context->bw_ctx.dml.soc.num_states; i++) {
2232 if (vba->ModeSupport[i][flag_max_mpc_comb]) {
2233 vba->maxMpcComb = flag_max_mpc_comb;
2234 vba->VoltageLevel = i;
2241 /* this should never happen */
2246 *vlevel_out = vlevel;
2247 *pipe_cnt_out = pipe_cnt;
2260 void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
2261 display_e2e_pipe_params_st *pipes,
2265 int i, pipe_idx, vlevel_temp = 0;
2266 double dcfclk = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
2267 double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
2268 double dram_speed_from_validation = context->bw_ctx.dml.vba.DRAMSpeed;
2269 double dcfclk_from_fw_based_mclk_switching = dcfclk_from_validation;
2270 bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
2271 dm_dram_clock_change_unsupported;
2272 unsigned int dummy_latency_index = 0;
2273 int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
2274 unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
2275 bool subvp_in_use = dcn32_subvp_in_use(dc, context);
2276 unsigned int min_dram_speed_mts_margin;
2277 bool need_fclk_lat_as_dummy = false;
2278 bool is_subvp_p_drr = false;
2279 struct dc_stream_state *fpo_candidate_stream = NULL;
2281 dc_assert_fp_enabled();
2283 /* need to find dummy latency index for subvp */
2285 /* Override DRAMClockChangeSupport for SubVP + DRR case where the DRR cannot switch without stretching it's VBLANK */
2287 context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp;
2288 context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = dm_prefetch_support_fclk_and_stutter;
2290 is_subvp_p_drr = true;
2292 dummy_latency_index = dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(dc,
2293 context, pipes, pipe_cnt, vlevel);
2295 /* For DCN32/321 need to validate with fclk pstate change latency equal to dummy so prefetch is
2296 * scheduled correctly to account for dummy pstate.
2298 if (context->bw_ctx.dml.soc.fclk_change_latency_us < dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us) {
2299 need_fclk_lat_as_dummy = true;
2300 context->bw_ctx.dml.soc.fclk_change_latency_us =
2301 dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
2303 context->bw_ctx.dml.soc.dram_clock_change_latency_us =
2304 dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
2305 dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
2306 maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
2307 if (is_subvp_p_drr) {
2308 context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp;
2312 context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
2313 for (i = 0; i < context->stream_count; i++) {
2314 if (context->streams[i])
2315 context->streams[i]->fpo_in_use = false;
2318 if (!pstate_en || (!dc->debug.disable_fpo_optimizations &&
2319 pstate_en && vlevel != 0)) {
2320 /* only when the mclk switch can not be natural, is the fw based vblank stretch attempted */
2321 fpo_candidate_stream = dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(dc, context);
2322 if (fpo_candidate_stream) {
2323 fpo_candidate_stream->fpo_in_use = true;
2324 context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = true;
2327 if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
2328 dummy_latency_index = dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(dc,
2329 context, pipes, pipe_cnt, vlevel);
2331 /* After calling dcn30_find_dummy_latency_index_for_fw_based_mclk_switch
2332 * we reinstate the original dram_clock_change_latency_us on the context
2333 * and all variables that may have changed up to this point, except the
2334 * newly found dummy_latency_index
2336 context->bw_ctx.dml.soc.dram_clock_change_latency_us =
2337 dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
2338 /* For DCN32/321 need to validate with fclk pstate change latency equal to dummy so
2339 * prefetch is scheduled correctly to account for dummy pstate.
2341 if (context->bw_ctx.dml.soc.fclk_change_latency_us < dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us) {
2342 need_fclk_lat_as_dummy = true;
2343 context->bw_ctx.dml.soc.fclk_change_latency_us =
2344 dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
2346 dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel_temp, false);
2347 if (vlevel_temp < vlevel) {
2348 vlevel = vlevel_temp;
2349 maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
2350 dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
2352 context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank;
2354 /* Restore FCLK latency and re-run validation to go back to original validation
2355 * output if we find that enabling FPO does not give us any benefit (i.e. lower
2358 context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
2359 for (i = 0; i < context->stream_count; i++) {
2360 if (context->streams[i])
2361 context->streams[i]->fpo_in_use = false;
2363 context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us;
2364 dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
2370 * For Set B calculations use clocks from clock_limits[2] when available i.e. when SMU is present,
2371 * otherwise use arbitrary low value from spreadsheet for DCFCLK as lower is safer for watermark
2372 * calculations to cover bootup clocks.
2373 * DCFCLK: soc.clock_limits[2] when available
2374 * UCLK: soc.clock_limits[2] when available
2376 if (dcn3_2_soc.num_states > 2) {
2378 dcfclk = dcn3_2_soc.clock_limits[2].dcfclk_mhz;
2380 dcfclk = 615; //DCFCLK Vmin_lv
2382 pipes[0].clks_cfg.voltage = vlevel_temp;
2383 pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
2384 pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz;
2386 if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
2387 context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
2388 context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.fclk_change_latency_us;
2389 context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
2390 context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
2392 context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2393 context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2394 context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2395 context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2396 context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2397 context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2398 context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2399 context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2400 context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2401 context->bw_ctx.bw.dcn.watermarks.b.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2405 * DCFCLK: Min, as reported by PM FW when available
2406 * UCLK : Min, as reported by PM FW when available
2407 * sr_enter_exit/sr_exit should be lower than used for DRAM (TBD after bringup or later, use as decided in Clk Mgr)
2411 if (dcn3_2_soc.num_states > 2) {
2413 dcfclk = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
2415 dcfclk = 615; //DCFCLK Vmin_lv
2417 pipes[0].clks_cfg.voltage = vlevel_temp;
2418 pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
2419 pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz;
2421 if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
2422 context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
2423 context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.fclk_change_latency_us;
2424 context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
2425 context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
2427 context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2428 context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2429 context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2430 context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2431 context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2432 context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2433 context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2434 context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2435 context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2436 context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2439 /* Set C, for Dummy P-State:
2441 * DCFCLK: Min, as reported by PM FW, when available
2442 * UCLK : Min, as reported by PM FW, when available
2443 * pstate latency as per UCLK state dummy pstate latency
2446 // For Set A and Set C use values from validation
2447 pipes[0].clks_cfg.voltage = vlevel;
2448 pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_validation;
2449 pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
2451 if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
2452 pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_fw_based_mclk_switching;
2455 if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
2456 min_dram_speed_mts = dram_speed_from_validation;
2457 min_dram_speed_mts_margin = 160;
2459 context->bw_ctx.dml.soc.dram_clock_change_latency_us =
2460 dc->clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us;
2462 if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] ==
2463 dm_dram_clock_change_unsupported) {
2464 int min_dram_speed_mts_offset = dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1;
2466 min_dram_speed_mts =
2467 dc->clk_mgr->bw_params->clk_table.entries[min_dram_speed_mts_offset].memclk_mhz * 16;
2470 if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && !subvp_in_use) {
2471 /* find largest table entry that is lower than dram speed,
2472 * but lower than DPM0 still uses DPM0
2474 for (dummy_latency_index = 3; dummy_latency_index > 0; dummy_latency_index--)
2475 if (min_dram_speed_mts + min_dram_speed_mts_margin >
2476 dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dram_speed_mts)
2480 context->bw_ctx.dml.soc.dram_clock_change_latency_us =
2481 dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
2483 context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us;
2484 context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
2485 context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
2488 context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2489 context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2490 context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2491 context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2492 context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2493 context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2494 context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2495 context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2496 /* On DCN32/321, PMFW will set PSTATE_CHANGE_TYPE = 1 (FCLK) for UCLK dummy p-state.
2497 * In this case we must program FCLK WM Set C to use the UCLK dummy p-state WM
2500 context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2501 context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2503 if ((!pstate_en) && (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid)) {
2504 /* The only difference between A and C is p-state latency, if p-state is not supported
2505 * with full p-state latency we want to calculate DLG based on dummy p-state latency,
2506 * Set A p-state watermark set to 0 on DCN30, when p-state unsupported, for now keep as DCN30.
2508 context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
2509 context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
2510 /* Calculate FCLK p-state change watermark based on FCLK pstate change latency in case
2511 * UCLK p-state is not supported, to avoid underflow in case FCLK pstate is supported
2513 context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2517 * DCFCLK: Min, as reported by PM FW, when available
2518 * UCLK: Min, as reported by PM FW, when available
2521 /* For set A set the correct latency values (i.e. non-dummy values) unconditionally
2523 context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
2524 context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
2525 context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
2527 context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2528 context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2529 context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2530 context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2531 context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2532 context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2533 context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2534 context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2535 context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2536 context->bw_ctx.bw.dcn.watermarks.a.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2539 /* Make set D = set A since we do not optimized watermarks for MALL */
2540 context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
2542 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
2543 if (!context->res_ctx.pipe_ctx[i].stream)
2546 pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
2547 pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
2549 if (dc->config.forced_clocks) {
2550 pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
2551 pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
2553 if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
2554 pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
2555 if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
2556 pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
2561 context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
2563 /* for proper prefetch calculations, if dummy lat > fclk lat, use fclk lat = dummy lat */
2564 if (need_fclk_lat_as_dummy)
2565 context->bw_ctx.dml.soc.fclk_change_latency_us =
2566 dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
2568 dcn32_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
2571 /* Restore full p-state latency */
2572 context->bw_ctx.dml.soc.dram_clock_change_latency_us =
2573 dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
2575 /* revert fclk lat changes if required */
2576 if (need_fclk_lat_as_dummy)
2577 context->bw_ctx.dml.soc.fclk_change_latency_us =
2578 dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us;
2581 static void dcn32_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
2582 unsigned int *optimal_dcfclk,
2583 unsigned int *optimal_fclk)
2585 double bw_from_dram, bw_from_dram1, bw_from_dram2;
2587 bw_from_dram1 = uclk_mts * dcn3_2_soc.num_chans *
2588 dcn3_2_soc.dram_channel_width_bytes * (dcn3_2_soc.max_avg_dram_bw_use_normal_percent / 100);
2589 bw_from_dram2 = uclk_mts * dcn3_2_soc.num_chans *
2590 dcn3_2_soc.dram_channel_width_bytes * (dcn3_2_soc.max_avg_sdp_bw_use_normal_percent / 100);
2592 bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
2595 *optimal_fclk = bw_from_dram /
2596 (dcn3_2_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_2_soc.max_avg_sdp_bw_use_normal_percent / 100));
2599 *optimal_dcfclk = bw_from_dram /
2600 (dcn3_2_soc.return_bus_width_bytes * (dcn3_2_soc.max_avg_sdp_bw_use_normal_percent / 100));
2603 static void remove_entry_from_table_at_index(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries,
2608 if (*num_entries == 0)
2611 for (i = index; i < *num_entries - 1; i++) {
2612 table[i] = table[i + 1];
2614 memset(&table[--(*num_entries)], 0, sizeof(struct _vcs_dpi_voltage_scaling_st));
2617 void dcn32_patch_dpm_table(struct clk_bw_params *bw_params)
2620 unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0,
2621 max_phyclk_mhz = 0, max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0;
2623 for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
2624 if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
2625 max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
2626 if (bw_params->clk_table.entries[i].fclk_mhz > max_fclk_mhz)
2627 max_fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
2628 if (bw_params->clk_table.entries[i].memclk_mhz > max_uclk_mhz)
2629 max_uclk_mhz = bw_params->clk_table.entries[i].memclk_mhz;
2630 if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
2631 max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
2632 if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
2633 max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
2634 if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
2635 max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
2636 if (bw_params->clk_table.entries[i].dtbclk_mhz > max_dtbclk_mhz)
2637 max_dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
2640 /* Scan through clock values we currently have and if they are 0,
2641 * then populate it with dcn3_2_soc.clock_limits[] value.
2643 * Do it for DCFCLK, DISPCLK, DTBCLK and UCLK as any of those being
2644 * 0, will cause it to skip building the clock table.
2646 if (max_dcfclk_mhz == 0)
2647 bw_params->clk_table.entries[0].dcfclk_mhz = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
2648 if (max_dispclk_mhz == 0)
2649 bw_params->clk_table.entries[0].dispclk_mhz = dcn3_2_soc.clock_limits[0].dispclk_mhz;
2650 if (max_dtbclk_mhz == 0)
2651 bw_params->clk_table.entries[0].dtbclk_mhz = dcn3_2_soc.clock_limits[0].dtbclk_mhz;
2652 if (max_uclk_mhz == 0)
2653 bw_params->clk_table.entries[0].memclk_mhz = dcn3_2_soc.clock_limits[0].dram_speed_mts / 16;
2656 static void swap_table_entries(struct _vcs_dpi_voltage_scaling_st *first_entry,
2657 struct _vcs_dpi_voltage_scaling_st *second_entry)
2659 struct _vcs_dpi_voltage_scaling_st temp_entry = *first_entry;
2660 *first_entry = *second_entry;
2661 *second_entry = temp_entry;
2665 * sort_entries_with_same_bw - Sort entries sharing the same bandwidth by DCFCLK
2667 static void sort_entries_with_same_bw(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
2669 unsigned int start_index = 0;
2670 unsigned int end_index = 0;
2671 unsigned int current_bw = 0;
2673 for (int i = 0; i < (*num_entries - 1); i++) {
2674 if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) {
2675 current_bw = table[i].net_bw_in_kbytes_sec;
2679 while ((i < (*num_entries - 1)) && (table[i+1].net_bw_in_kbytes_sec == current_bw))
2683 if (start_index != end_index) {
2684 for (int j = start_index; j < end_index; j++) {
2685 for (int k = start_index; k < end_index; k++) {
2686 if (table[k].dcfclk_mhz > table[k+1].dcfclk_mhz)
2687 swap_table_entries(&table[k], &table[k+1]);
2699 * remove_inconsistent_entries - Ensure entries with the same bandwidth have MEMCLK and FCLK monotonically increasing
2700 * and remove entries that do not
2702 static void remove_inconsistent_entries(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
2704 for (int i = 0; i < (*num_entries - 1); i++) {
2705 if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) {
2706 if ((table[i].dram_speed_mts > table[i+1].dram_speed_mts) ||
2707 (table[i].fabricclk_mhz > table[i+1].fabricclk_mhz))
2708 remove_entry_from_table_at_index(table, num_entries, i);
2714 * override_max_clk_values - Overwrite the max clock frequencies with the max DC mode timings
2716 * max_clk_limit - struct containing the desired clock timings
2718 * curr_clk_limit - struct containing the timings that need to be overwritten
2719 * Return: 0 upon success, non-zero for failure
2721 static int override_max_clk_values(struct clk_limit_table_entry *max_clk_limit,
2722 struct clk_limit_table_entry *curr_clk_limit)
2724 if (NULL == max_clk_limit || NULL == curr_clk_limit)
2725 return -1; //invalid parameters
2727 //only overwrite if desired max clock frequency is initialized
2728 if (max_clk_limit->dcfclk_mhz != 0)
2729 curr_clk_limit->dcfclk_mhz = max_clk_limit->dcfclk_mhz;
2731 if (max_clk_limit->fclk_mhz != 0)
2732 curr_clk_limit->fclk_mhz = max_clk_limit->fclk_mhz;
2734 if (max_clk_limit->memclk_mhz != 0)
2735 curr_clk_limit->memclk_mhz = max_clk_limit->memclk_mhz;
2737 if (max_clk_limit->socclk_mhz != 0)
2738 curr_clk_limit->socclk_mhz = max_clk_limit->socclk_mhz;
2740 if (max_clk_limit->dtbclk_mhz != 0)
2741 curr_clk_limit->dtbclk_mhz = max_clk_limit->dtbclk_mhz;
2743 if (max_clk_limit->dispclk_mhz != 0)
2744 curr_clk_limit->dispclk_mhz = max_clk_limit->dispclk_mhz;
2749 static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk_bw_params *bw_params,
2750 struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
2753 struct _vcs_dpi_voltage_scaling_st entry = {0};
2754 struct clk_limit_table_entry max_clk_data = {0};
2756 unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299;
2758 static const unsigned int num_dcfclk_stas = 5;
2759 unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {199, 615, 906, 1324, 1564};
2761 unsigned int num_uclk_dpms = 0;
2762 unsigned int num_fclk_dpms = 0;
2763 unsigned int num_dcfclk_dpms = 0;
2765 unsigned int num_dc_uclk_dpms = 0;
2766 unsigned int num_dc_fclk_dpms = 0;
2767 unsigned int num_dc_dcfclk_dpms = 0;
2769 for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
2770 if (bw_params->clk_table.entries[i].dcfclk_mhz > max_clk_data.dcfclk_mhz)
2771 max_clk_data.dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
2772 if (bw_params->clk_table.entries[i].fclk_mhz > max_clk_data.fclk_mhz)
2773 max_clk_data.fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
2774 if (bw_params->clk_table.entries[i].memclk_mhz > max_clk_data.memclk_mhz)
2775 max_clk_data.memclk_mhz = bw_params->clk_table.entries[i].memclk_mhz;
2776 if (bw_params->clk_table.entries[i].dispclk_mhz > max_clk_data.dispclk_mhz)
2777 max_clk_data.dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
2778 if (bw_params->clk_table.entries[i].dppclk_mhz > max_clk_data.dppclk_mhz)
2779 max_clk_data.dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
2780 if (bw_params->clk_table.entries[i].phyclk_mhz > max_clk_data.phyclk_mhz)
2781 max_clk_data.phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
2782 if (bw_params->clk_table.entries[i].dtbclk_mhz > max_clk_data.dtbclk_mhz)
2783 max_clk_data.dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
2785 if (bw_params->clk_table.entries[i].memclk_mhz > 0) {
2787 if (bw_params->clk_table.entries[i].memclk_mhz <= bw_params->dc_mode_limit.memclk_mhz)
2790 if (bw_params->clk_table.entries[i].fclk_mhz > 0) {
2792 if (bw_params->clk_table.entries[i].fclk_mhz <= bw_params->dc_mode_limit.fclk_mhz)
2795 if (bw_params->clk_table.entries[i].dcfclk_mhz > 0) {
2797 if (bw_params->clk_table.entries[i].dcfclk_mhz <= bw_params->dc_mode_limit.dcfclk_mhz)
2798 num_dc_dcfclk_dpms++;
2802 if (!disable_dc_mode_overwrite) {
2803 //Overwrite max frequencies with max DC mode frequencies for DC mode systems
2804 override_max_clk_values(&bw_params->dc_mode_limit, &max_clk_data);
2805 num_uclk_dpms = num_dc_uclk_dpms;
2806 num_fclk_dpms = num_dc_fclk_dpms;
2807 num_dcfclk_dpms = num_dc_dcfclk_dpms;
2808 bw_params->clk_table.num_entries_per_clk.num_memclk_levels = num_uclk_dpms;
2809 bw_params->clk_table.num_entries_per_clk.num_fclk_levels = num_fclk_dpms;
2812 if (num_dcfclk_dpms > 0 && bw_params->clk_table.entries[0].fclk_mhz > min_fclk_mhz)
2813 min_fclk_mhz = bw_params->clk_table.entries[0].fclk_mhz;
2815 if (!max_clk_data.dcfclk_mhz || !max_clk_data.dispclk_mhz || !max_clk_data.dtbclk_mhz)
2818 if (max_clk_data.dppclk_mhz == 0)
2819 max_clk_data.dppclk_mhz = max_clk_data.dispclk_mhz;
2821 if (max_clk_data.fclk_mhz == 0)
2822 max_clk_data.fclk_mhz = max_clk_data.dcfclk_mhz *
2823 dcn3_2_soc.pct_ideal_sdp_bw_after_urgent /
2824 dcn3_2_soc.pct_ideal_fabric_bw_after_urgent;
2826 if (max_clk_data.phyclk_mhz == 0)
2827 max_clk_data.phyclk_mhz = dcn3_2_soc.clock_limits[0].phyclk_mhz;
2830 entry.dispclk_mhz = max_clk_data.dispclk_mhz;
2831 entry.dscclk_mhz = max_clk_data.dispclk_mhz / 3;
2832 entry.dppclk_mhz = max_clk_data.dppclk_mhz;
2833 entry.dtbclk_mhz = max_clk_data.dtbclk_mhz;
2834 entry.phyclk_mhz = max_clk_data.phyclk_mhz;
2835 entry.phyclk_d18_mhz = dcn3_2_soc.clock_limits[0].phyclk_d18_mhz;
2836 entry.phyclk_d32_mhz = dcn3_2_soc.clock_limits[0].phyclk_d32_mhz;
2838 // Insert all the DCFCLK STAs
2839 for (i = 0; i < num_dcfclk_stas; i++) {
2840 entry.dcfclk_mhz = dcfclk_sta_targets[i];
2841 entry.fabricclk_mhz = 0;
2842 entry.dram_speed_mts = 0;
2844 get_optimal_ntuple(&entry);
2845 entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
2846 insert_entry_into_table_sorted(table, num_entries, &entry);
2849 // Insert the max DCFCLK
2850 entry.dcfclk_mhz = max_clk_data.dcfclk_mhz;
2851 entry.fabricclk_mhz = 0;
2852 entry.dram_speed_mts = 0;
2854 get_optimal_ntuple(&entry);
2855 entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
2856 insert_entry_into_table_sorted(table, num_entries, &entry);
2858 // Insert the UCLK DPMS
2859 for (i = 0; i < num_uclk_dpms; i++) {
2860 entry.dcfclk_mhz = 0;
2861 entry.fabricclk_mhz = 0;
2862 entry.dram_speed_mts = bw_params->clk_table.entries[i].memclk_mhz * 16;
2864 get_optimal_ntuple(&entry);
2865 entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
2866 insert_entry_into_table_sorted(table, num_entries, &entry);
2869 // If FCLK is coarse grained, insert individual DPMs.
2870 if (num_fclk_dpms > 2) {
2871 for (i = 0; i < num_fclk_dpms; i++) {
2872 entry.dcfclk_mhz = 0;
2873 entry.fabricclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
2874 entry.dram_speed_mts = 0;
2876 get_optimal_ntuple(&entry);
2877 entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
2878 insert_entry_into_table_sorted(table, num_entries, &entry);
2881 // If FCLK fine grained, only insert max
2883 entry.dcfclk_mhz = 0;
2884 entry.fabricclk_mhz = max_clk_data.fclk_mhz;
2885 entry.dram_speed_mts = 0;
2887 get_optimal_ntuple(&entry);
2888 entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
2889 insert_entry_into_table_sorted(table, num_entries, &entry);
2892 // At this point, the table contains all "points of interest" based on
2893 // DPMs from PMFW, and STAs. Table is sorted by BW, and all clock
2894 // ratios (by derate, are exact).
2896 // Remove states that require higher clocks than are supported
2897 for (i = *num_entries - 1; i >= 0 ; i--) {
2898 if (table[i].dcfclk_mhz > max_clk_data.dcfclk_mhz ||
2899 table[i].fabricclk_mhz > max_clk_data.fclk_mhz ||
2900 table[i].dram_speed_mts > max_clk_data.memclk_mhz * 16)
2901 remove_entry_from_table_at_index(table, num_entries, i);
2904 // Insert entry with all max dc limits without bandwidth matching
2905 if (!disable_dc_mode_overwrite) {
2906 struct _vcs_dpi_voltage_scaling_st max_dc_limits_entry = entry;
2908 max_dc_limits_entry.dcfclk_mhz = max_clk_data.dcfclk_mhz;
2909 max_dc_limits_entry.fabricclk_mhz = max_clk_data.fclk_mhz;
2910 max_dc_limits_entry.dram_speed_mts = max_clk_data.memclk_mhz * 16;
2912 max_dc_limits_entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&max_dc_limits_entry);
2913 insert_entry_into_table_sorted(table, num_entries, &max_dc_limits_entry);
2915 sort_entries_with_same_bw(table, num_entries);
2916 remove_inconsistent_entries(table, num_entries);
2919 // At this point, the table only contains supported points of interest
2920 // it could be used as is, but some states may be redundant due to
2921 // coarse grained nature of some clocks, so we want to round up to
2922 // coarse grained DPMs and remove duplicates.
2925 for (i = *num_entries - 1; i >= 0 ; i--) {
2926 for (j = 0; j < num_uclk_dpms; j++) {
2927 if (bw_params->clk_table.entries[j].memclk_mhz * 16 >= table[i].dram_speed_mts) {
2928 table[i].dram_speed_mts = bw_params->clk_table.entries[j].memclk_mhz * 16;
2934 // If FCLK is coarse grained, round up to next DPMs
2935 if (num_fclk_dpms > 2) {
2936 for (i = *num_entries - 1; i >= 0 ; i--) {
2937 for (j = 0; j < num_fclk_dpms; j++) {
2938 if (bw_params->clk_table.entries[j].fclk_mhz >= table[i].fabricclk_mhz) {
2939 table[i].fabricclk_mhz = bw_params->clk_table.entries[j].fclk_mhz;
2945 // Otherwise, round up to minimum.
2947 for (i = *num_entries - 1; i >= 0 ; i--) {
2948 if (table[i].fabricclk_mhz < min_fclk_mhz) {
2949 table[i].fabricclk_mhz = min_fclk_mhz;
2954 // Round DCFCLKs up to minimum
2955 for (i = *num_entries - 1; i >= 0 ; i--) {
2956 if (table[i].dcfclk_mhz < min_dcfclk_mhz) {
2957 table[i].dcfclk_mhz = min_dcfclk_mhz;
2961 // Remove duplicate states, note duplicate states are always neighbouring since table is sorted.
2963 while (i < *num_entries - 1) {
2964 if (table[i].dcfclk_mhz == table[i + 1].dcfclk_mhz &&
2965 table[i].fabricclk_mhz == table[i + 1].fabricclk_mhz &&
2966 table[i].dram_speed_mts == table[i + 1].dram_speed_mts)
2967 remove_entry_from_table_at_index(table, num_entries, i + 1);
2972 // Fix up the state indicies
2973 for (i = *num_entries - 1; i >= 0 ; i--) {
2981 * dcn32_update_bw_bounding_box
2983 * This would override some dcn3_2 ip_or_soc initial parameters hardcoded from
2984 * spreadsheet with actual values as per dGPU SKU:
2985 * - with passed few options from dc->config
2986 * - with dentist_vco_frequency from Clk Mgr (currently hardcoded, but might
2987 * need to get it from PM FW)
2988 * - with passed latency values (passed in ns units) in dc-> bb override for
2989 * debugging purposes
2990 * - with passed latencies from VBIOS (in 100_ns units) if available for
2992 * - with number of DRAM channels from VBIOS (which differ for certain dGPU SKU
2994 * - clocks levels with passed clk_table entries from Clk Mgr as reported by PM
2995 * FW for different clocks (which might differ for certain dGPU SKU of the
2998 void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
3000 dc_assert_fp_enabled();
3002 /* Overrides from dc->config options */
3003 dcn3_2_ip.clamp_min_dcfclk = dc->config.clamp_min_dcfclk;
3005 /* Override from passed dc->bb_overrides if available*/
3006 if ((int)(dcn3_2_soc.sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
3007 && dc->bb_overrides.sr_exit_time_ns) {
3008 dc->dml2_options.bbox_overrides.sr_exit_latency_us =
3009 dcn3_2_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
3012 if ((int)(dcn3_2_soc.sr_enter_plus_exit_time_us * 1000)
3013 != dc->bb_overrides.sr_enter_plus_exit_time_ns
3014 && dc->bb_overrides.sr_enter_plus_exit_time_ns) {
3015 dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us =
3016 dcn3_2_soc.sr_enter_plus_exit_time_us =
3017 dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
3020 if ((int)(dcn3_2_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
3021 && dc->bb_overrides.urgent_latency_ns) {
3022 dcn3_2_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
3023 dc->dml2_options.bbox_overrides.urgent_latency_us =
3024 dcn3_2_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
3027 if ((int)(dcn3_2_soc.dram_clock_change_latency_us * 1000)
3028 != dc->bb_overrides.dram_clock_change_latency_ns
3029 && dc->bb_overrides.dram_clock_change_latency_ns) {
3030 dc->dml2_options.bbox_overrides.dram_clock_change_latency_us =
3031 dcn3_2_soc.dram_clock_change_latency_us =
3032 dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
3035 if ((int)(dcn3_2_soc.fclk_change_latency_us * 1000)
3036 != dc->bb_overrides.fclk_clock_change_latency_ns
3037 && dc->bb_overrides.fclk_clock_change_latency_ns) {
3038 dc->dml2_options.bbox_overrides.fclk_change_latency_us =
3039 dcn3_2_soc.fclk_change_latency_us =
3040 dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
3043 if ((int)(dcn3_2_soc.dummy_pstate_latency_us * 1000)
3044 != dc->bb_overrides.dummy_clock_change_latency_ns
3045 && dc->bb_overrides.dummy_clock_change_latency_ns) {
3046 dcn3_2_soc.dummy_pstate_latency_us =
3047 dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0;
3050 /* Override from VBIOS if VBIOS bb_info available */
3051 if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
3052 struct bp_soc_bb_info bb_info = {0};
3054 if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
3055 if (bb_info.dram_clock_change_latency_100ns > 0)
3056 dc->dml2_options.bbox_overrides.dram_clock_change_latency_us =
3057 dcn3_2_soc.dram_clock_change_latency_us =
3058 bb_info.dram_clock_change_latency_100ns * 10;
3060 if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
3061 dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us =
3062 dcn3_2_soc.sr_enter_plus_exit_time_us =
3063 bb_info.dram_sr_enter_exit_latency_100ns * 10;
3065 if (bb_info.dram_sr_exit_latency_100ns > 0)
3066 dc->dml2_options.bbox_overrides.sr_exit_latency_us =
3067 dcn3_2_soc.sr_exit_time_us =
3068 bb_info.dram_sr_exit_latency_100ns * 10;
3072 /* Override from VBIOS for num_chan */
3073 if (dc->ctx->dc_bios->vram_info.num_chans) {
3074 dc->dml2_options.bbox_overrides.dram_num_chan =
3075 dcn3_2_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
3076 dcn3_2_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc,
3077 dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel);
3080 if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
3081 dc->dml2_options.bbox_overrides.dram_chanel_width_bytes =
3082 dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
3084 /* DML DSC delay factor workaround */
3085 dcn3_2_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0;
3087 dcn3_2_ip.min_prefetch_in_strobe_us = dc->debug.min_prefetch_in_strobe_ns / 1000.0;
3089 /* Override dispclk_dppclk_vco_speed_mhz from Clk Mgr */
3090 dcn3_2_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
3091 dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
3092 dc->dml2_options.bbox_overrides.disp_pll_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
3093 dc->dml2_options.bbox_overrides.xtalclk_mhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000.0;
3094 dc->dml2_options.bbox_overrides.dchub_refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
3095 dc->dml2_options.bbox_overrides.dprefclk_mhz = dc->clk_mgr->dprefclk_khz / 1000.0;
3097 /* Overrides Clock levelsfrom CLK Mgr table entries as reported by PM FW */
3098 if (bw_params->clk_table.entries[0].memclk_mhz) {
3099 if (dc->debug.use_legacy_soc_bb_mechanism) {
3100 unsigned int i = 0, j = 0, num_states = 0;
3102 unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0};
3103 unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0};
3104 unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0};
3105 unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0};
3106 unsigned int min_dcfclk = UINT_MAX;
3107 /* Set 199 as first value in STA target array to have a minimum DCFCLK value.
3108 * For DCN32 we set min to 199 so minimum FCLK DPM0 (300Mhz can be achieved) */
3109 unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {199, 615, 906, 1324, 1564};
3110 unsigned int num_dcfclk_sta_targets = 4, num_uclk_states = 0;
3111 unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
3113 for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
3114 if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
3115 max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
3116 if (bw_params->clk_table.entries[i].dcfclk_mhz != 0 &&
3117 bw_params->clk_table.entries[i].dcfclk_mhz < min_dcfclk)
3118 min_dcfclk = bw_params->clk_table.entries[i].dcfclk_mhz;
3119 if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
3120 max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
3121 if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
3122 max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
3123 if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
3124 max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
3126 if (min_dcfclk > dcfclk_sta_targets[0])
3127 dcfclk_sta_targets[0] = min_dcfclk;
3128 if (!max_dcfclk_mhz)
3129 max_dcfclk_mhz = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
3130 if (!max_dispclk_mhz)
3131 max_dispclk_mhz = dcn3_2_soc.clock_limits[0].dispclk_mhz;
3132 if (!max_dppclk_mhz)
3133 max_dppclk_mhz = dcn3_2_soc.clock_limits[0].dppclk_mhz;
3134 if (!max_phyclk_mhz)
3135 max_phyclk_mhz = dcn3_2_soc.clock_limits[0].phyclk_mhz;
3137 if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
3138 // If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array
3139 dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
3140 num_dcfclk_sta_targets++;
3141 } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
3142 // If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates
3143 for (i = 0; i < num_dcfclk_sta_targets; i++) {
3144 if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
3145 dcfclk_sta_targets[i] = max_dcfclk_mhz;
3149 // Update size of array since we "removed" duplicates
3150 num_dcfclk_sta_targets = i + 1;
3153 num_uclk_states = bw_params->clk_table.num_entries;
3155 // Calculate optimal dcfclk for each uclk
3156 for (i = 0; i < num_uclk_states; i++) {
3157 dcn32_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
3158 &optimal_dcfclk_for_uclk[i], NULL);
3159 if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) {
3160 optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz;
3164 // Calculate optimal uclk for each dcfclk sta target
3165 for (i = 0; i < num_dcfclk_sta_targets; i++) {
3166 for (j = 0; j < num_uclk_states; j++) {
3167 if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
3168 optimal_uclk_for_dcfclk_sta_targets[i] =
3169 bw_params->clk_table.entries[j].memclk_mhz * 16;
3177 // create the final dcfclk and uclk table
3178 while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
3179 if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
3180 dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
3181 dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
3183 if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
3184 dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
3185 dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
3187 j = num_uclk_states;
3192 while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) {
3193 dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
3194 dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
3197 while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
3198 optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
3199 dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
3200 dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
3203 dcn3_2_soc.num_states = num_states;
3204 for (i = 0; i < dcn3_2_soc.num_states; i++) {
3205 dcn3_2_soc.clock_limits[i].state = i;
3206 dcn3_2_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
3207 dcn3_2_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
3209 /* Fill all states with max values of all these clocks */
3210 dcn3_2_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
3211 dcn3_2_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
3212 dcn3_2_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz;
3213 dcn3_2_soc.clock_limits[i].dscclk_mhz = max_dispclk_mhz / 3;
3215 /* Populate from bw_params for DTBCLK, SOCCLK */
3217 if (!bw_params->clk_table.entries[i].dtbclk_mhz) {
3218 dcn3_2_soc.clock_limits[i].dtbclk_mhz = dcn3_2_soc.clock_limits[i-1].dtbclk_mhz;
3220 dcn3_2_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
3222 } else if (bw_params->clk_table.entries[i].dtbclk_mhz) {
3223 dcn3_2_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
3226 if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0)
3227 dcn3_2_soc.clock_limits[i].socclk_mhz = dcn3_2_soc.clock_limits[i-1].socclk_mhz;
3229 dcn3_2_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz;
3231 if (!dram_speed_mts[i] && i > 0)
3232 dcn3_2_soc.clock_limits[i].dram_speed_mts = dcn3_2_soc.clock_limits[i-1].dram_speed_mts;
3234 dcn3_2_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
3236 /* These clocks cannot come from bw_params, always fill from dcn3_2_soc[0] */
3237 /* PHYCLK_D18, PHYCLK_D32 */
3238 dcn3_2_soc.clock_limits[i].phyclk_d18_mhz = dcn3_2_soc.clock_limits[0].phyclk_d18_mhz;
3239 dcn3_2_soc.clock_limits[i].phyclk_d32_mhz = dcn3_2_soc.clock_limits[0].phyclk_d32_mhz;
3242 build_synthetic_soc_states(dc->debug.disable_dc_mode_overwrite, bw_params,
3243 dcn3_2_soc.clock_limits, &dcn3_2_soc.num_states);
3246 /* Re-init DML with updated bb */
3247 dml_init_instance(&dc->dml, &dcn3_2_soc, &dcn3_2_ip, DML_PROJECT_DCN32);
3248 if (dc->current_state)
3249 dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_2_soc, &dcn3_2_ip, DML_PROJECT_DCN32);
3252 if (dc->clk_mgr->bw_params->clk_table.num_entries > 1) {
3255 dc->dml2_options.bbox_overrides.clks_table.num_states = dc->clk_mgr->bw_params->clk_table.num_entries;
3257 dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
3258 dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels;
3260 dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
3261 dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels;
3263 dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
3264 dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels;
3266 dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels =
3267 dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels;
3269 dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels =
3270 dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels;
3272 dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels =
3273 dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
3275 dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels =
3276 dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dppclk_levels;
3278 for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels; i++) {
3279 if (dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz)
3280 dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz =
3281 dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz;
3284 for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels; i++) {
3285 if (dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz)
3286 dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz =
3287 dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz;
3290 for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; i++) {
3291 if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz)
3292 dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
3293 dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz;
3296 for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels; i++) {
3297 if (dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz)
3298 dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz =
3299 dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz;
3302 for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels; i++) {
3303 if (dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz)
3304 dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
3305 dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz;
3308 for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; i++) {
3309 if (dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz) {
3310 dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz =
3311 dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz;
3312 dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz =
3313 dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz;
3319 void dcn32_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
3322 dc_assert_fp_enabled();
3324 pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
3325 pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
3328 bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe)
3331 uint32_t refresh_rate = 0;
3332 uint32_t min_refresh = subvp_active_margin_list.min_refresh;
3333 uint32_t max_refresh = subvp_active_margin_list.max_refresh;
3336 for (i = 0; i < SUBVP_ACTIVE_MARGIN_LIST_LEN; i++) {
3337 uint32_t width = subvp_active_margin_list.res[i].width;
3338 uint32_t height = subvp_active_margin_list.res[i].height;
3340 refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
3341 pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
3342 refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
3343 refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
3345 if (refresh_rate >= min_refresh && refresh_rate <= max_refresh &&
3346 dcn32_check_native_scaling_for_res(pipe, width, height)) {
3355 * dcn32_allow_subvp_high_refresh_rate: Determine if the high refresh rate config will allow subvp
3357 * @dc: Current DC state
3358 * @context: New DC state to be programmed
3359 * @pipe: Pipe to be considered for use in subvp
3361 * On high refresh rate display configs, we will allow subvp under the following conditions:
3362 * 1. Resolution is 3840x2160, 3440x1440, or 2560x1440
3363 * 2. Refresh rate is between 120hz - 165hz
3365 * 4. Freesync is inactive
3366 * 5. For single display cases, freesync must be disabled
3368 * Return: True if pipe can be used for subvp, false otherwise
3370 bool dcn32_allow_subvp_high_refresh_rate(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe)
3373 uint32_t refresh_rate = 0;
3374 uint32_t subvp_min_refresh = subvp_high_refresh_list.min_refresh;
3375 uint32_t subvp_max_refresh = subvp_high_refresh_list.max_refresh;
3376 uint32_t min_refresh = subvp_max_refresh;
3379 /* Only allow SubVP on high refresh displays if all connected displays
3380 * are considered "high refresh" (i.e. >= 120hz). We do not want to
3381 * allow combinations such as 120hz (SubVP) + 60hz (SubVP).
3383 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3384 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3386 if (!pipe_ctx->stream)
3388 refresh_rate = (pipe_ctx->stream->timing.pix_clk_100hz * 100 +
3389 pipe_ctx->stream->timing.v_total * pipe_ctx->stream->timing.h_total - 1)
3390 / (double)(pipe_ctx->stream->timing.v_total * pipe_ctx->stream->timing.h_total);
3392 if (refresh_rate < min_refresh)
3393 min_refresh = refresh_rate;
3396 if (!dc->debug.disable_subvp_high_refresh && min_refresh >= subvp_min_refresh && pipe->stream &&
3397 pipe->plane_state && !(pipe->stream->vrr_active_variable || pipe->stream->vrr_active_fixed)) {
3398 refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
3399 pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
3400 / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
3401 if (refresh_rate >= subvp_min_refresh && refresh_rate <= subvp_max_refresh) {
3402 for (i = 0; i < SUBVP_HIGH_REFRESH_LIST_LEN; i++) {
3403 uint32_t width = subvp_high_refresh_list.res[i].width;
3404 uint32_t height = subvp_high_refresh_list.res[i].height;
3406 if (dcn32_check_native_scaling_for_res(pipe, width, height)) {
3407 if ((context->stream_count == 1 && !pipe->stream->allow_freesync) || context->stream_count > 1) {
3419 * dcn32_determine_max_vratio_prefetch: Determine max Vratio for prefetch by driver policy
3421 * @dc: Current DC state
3422 * @context: New DC state to be programmed
3424 * Return: Max vratio for prefetch
3426 double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *context)
3428 double max_vratio_pre = __DML_MAX_BW_RATIO_PRE__; // Default value is 4
3431 /* For single display MPO configs, allow the max vratio to be 8
3432 * if any plane is YUV420 format
3434 if (context->stream_count == 1 && context->stream_status[0].plane_count > 1) {
3435 for (i = 0; i < context->stream_status[0].plane_count; i++) {
3436 if (context->stream_status[0].plane_states[i]->format == SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr ||
3437 context->stream_status[0].plane_states[i]->format == SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb) {
3438 max_vratio_pre = __DML_MAX_VRATIO_PRE__;
3442 return max_vratio_pre;
3446 * dcn32_assign_fpo_vactive_candidate - Assign the FPO stream candidate for FPO + VActive case
3448 * This function chooses the FPO candidate stream for FPO + VActive cases (2 stream config).
3449 * For FPO + VAtive cases, the assumption is that one display has ActiveMargin > 0, and the
3450 * other display has ActiveMargin <= 0. This function will choose the pipe/stream that has
3451 * ActiveMargin <= 0 to be the FPO stream candidate if found.
3454 * @dc: current dc state
3455 * @context: new dc state
3456 * @fpo_candidate_stream: pointer to FPO stream candidate if one is found
3460 void dcn32_assign_fpo_vactive_candidate(struct dc *dc, const struct dc_state *context, struct dc_stream_state **fpo_candidate_stream)
3462 unsigned int i, pipe_idx;
3463 const struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
3465 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
3466 const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3468 /* In DCN32/321, FPO uses per-pipe P-State force.
3469 * If there's no planes, HUBP is power gated and
3470 * therefore programming UCLK_PSTATE_FORCE does
3471 * nothing (P-State will always be asserted naturally
3472 * on a pipe that has HUBP power gated. Therefore we
3473 * only want to enable FPO if the FPO pipe has both
3474 * a stream and a plane.
3476 if (!pipe->stream || !pipe->plane_state)
3479 if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
3480 *fpo_candidate_stream = pipe->stream;
3488 * dcn32_find_vactive_pipe - Determines if the config has a pipe that can switch in VACTIVE
3490 * @dc: current dc state
3491 * @context: new dc state
3492 * @vactive_margin_req_us: The vactive marign required for a vactive pipe to be considered "found"
3494 * Return: True if VACTIVE display is found, false otherwise
3496 bool dcn32_find_vactive_pipe(struct dc *dc, const struct dc_state *context, uint32_t vactive_margin_req_us)
3498 unsigned int i, pipe_idx;
3499 const struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
3500 bool vactive_found = false;
3501 unsigned int blank_us = 0;
3503 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
3504 const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3509 blank_us = ((pipe->stream->timing.v_total - pipe->stream->timing.v_addressable) * pipe->stream->timing.h_total /
3510 (double)(pipe->stream->timing.pix_clk_100hz * 100)) * 1000000;
3511 if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] >= vactive_margin_req_us &&
3512 !(pipe->stream->vrr_active_variable || pipe->stream->vrr_active_fixed) && blank_us < dc->debug.fpo_vactive_max_blank_us) {
3513 vactive_found = true;
3518 return vactive_found;
3521 void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb)
3523 dc_assert_fp_enabled();
3524 dcn3_2_soc.clock_limits[0].dcfclk_mhz = 1200.0;
3527 void dcn32_override_min_req_memclk(struct dc *dc, struct dc_state *context)
3529 // WA: restrict FPO and SubVP to use first non-strobe mode (DCN32 BW issue)
3530 if ((context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dcn32_subvp_in_use(dc, context)) &&
3531 dc->dml.soc.num_chans <= 8) {
3532 int num_mclk_levels = dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels;
3534 if (context->bw_ctx.dml.vba.DRAMSpeed <= dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 16 &&
3535 num_mclk_levels > 1) {
3536 context->bw_ctx.dml.vba.DRAMSpeed = dc->clk_mgr->bw_params->clk_table.entries[1].memclk_mhz * 16;
3537 context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;