2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dc_link_dp.h"
51 #include "link_hwss.h"
52 #include "dpcd_defs.h"
54 #include "dce/dmub_hw_lock_mgr.h"
56 #include "dce/dmub_outbox.h"
57 #include "inc/dc_link_dp.h"
58 #include "inc/link_dpcd.h"
60 #define DC_LOGGER_INIT(logger)
68 #define FN(reg_name, field_name) \
69 hws->shifts->field_name, hws->masks->field_name
71 /*print is 17 wide, first two characters are spaces*/
72 #define DTN_INFO_MICRO_SEC(ref_cycle) \
73 print_microsec(dc_ctx, log_ctx, ref_cycle)
75 #define GAMMA_HW_POINTS_NUM 256
77 #define PGFSM_POWER_ON 0
78 #define PGFSM_POWER_OFF 2
80 static void print_microsec(struct dc_context *dc_ctx,
81 struct dc_log_buffer_ctx *log_ctx,
84 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
85 static const unsigned int frac = 1000;
86 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
88 DTN_INFO(" %11d.%03d",
93 void dcn10_lock_all_pipes(struct dc *dc,
94 struct dc_state *context,
97 struct pipe_ctx *pipe_ctx;
98 struct timing_generator *tg;
101 for (i = 0; i < dc->res_pool->pipe_count; i++) {
102 pipe_ctx = &context->res_ctx.pipe_ctx[i];
103 tg = pipe_ctx->stream_res.tg;
106 * Only lock the top pipe's tg to prevent redundant
107 * (un)locking. Also skip if pipe is disabled.
109 if (pipe_ctx->top_pipe ||
111 !tg->funcs->is_tg_enabled(tg))
115 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
117 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
121 static void log_mpc_crc(struct dc *dc,
122 struct dc_log_buffer_ctx *log_ctx)
124 struct dc_context *dc_ctx = dc->ctx;
125 struct dce_hwseq *hws = dc->hwseq;
127 if (REG(MPC_CRC_RESULT_GB))
128 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
129 REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
130 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
131 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
132 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
135 static void dcn10_log_hubbub_state(struct dc *dc,
136 struct dc_log_buffer_ctx *log_ctx)
138 struct dc_context *dc_ctx = dc->ctx;
139 struct dcn_hubbub_wm wm;
142 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
143 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
145 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
146 " sr_enter sr_exit dram_clk_change\n");
148 for (i = 0; i < 4; i++) {
149 struct dcn_hubbub_wm_set *s;
152 DTN_INFO("WM_Set[%d]:", s->wm_set);
153 DTN_INFO_MICRO_SEC(s->data_urgent);
154 DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
155 DTN_INFO_MICRO_SEC(s->sr_enter);
156 DTN_INFO_MICRO_SEC(s->sr_exit);
157 DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
164 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
166 struct dc_context *dc_ctx = dc->ctx;
167 struct resource_pool *pool = dc->res_pool;
171 "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
172 for (i = 0; i < pool->pipe_count; i++) {
173 struct hubp *hubp = pool->hubps[i];
174 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
176 hubp->funcs->hubp_read_state(hubp);
179 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
192 s->underflow_status);
193 DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
194 DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
195 DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
200 DTN_INFO("\n=========RQ========\n");
201 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
202 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
203 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
204 for (i = 0; i < pool->pipe_count; i++) {
205 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
206 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
209 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
210 pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
211 rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
212 rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
213 rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
214 rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
215 rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
216 rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
217 rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
218 rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
221 DTN_INFO("========DLG========\n");
222 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
223 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
224 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
225 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
226 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
227 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
228 " x_rp_dlay x_rr_sfl\n");
229 for (i = 0; i < pool->pipe_count; i++) {
230 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
231 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
234 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
235 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
236 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
237 pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
238 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
239 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
240 dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
241 dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
242 dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
243 dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
244 dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
245 dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
246 dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
247 dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
248 dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
249 dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
250 dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
251 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
252 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
253 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
254 dlg_regs->xfc_reg_remote_surface_flip_latency);
257 DTN_INFO("========TTU========\n");
258 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
259 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
260 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
261 for (i = 0; i < pool->pipe_count; i++) {
262 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
263 struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
266 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
267 pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
268 ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
269 ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
270 ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
271 ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
272 ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
273 ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
278 void dcn10_log_hw_state(struct dc *dc,
279 struct dc_log_buffer_ctx *log_ctx)
281 struct dc_context *dc_ctx = dc->ctx;
282 struct resource_pool *pool = dc->res_pool;
287 dcn10_log_hubbub_state(dc, log_ctx);
289 dcn10_log_hubp_states(dc, log_ctx);
291 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
292 " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
293 "C31 C32 C33 C34\n");
294 for (i = 0; i < pool->pipe_count; i++) {
295 struct dpp *dpp = pool->dpps[i];
296 struct dcn_dpp_state s = {0};
298 dpp->funcs->dpp_read_state(dpp, &s);
303 DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
304 "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
307 (s.igam_lut_mode == 0) ? "BypassFixed" :
308 ((s.igam_lut_mode == 1) ? "BypassFloat" :
309 ((s.igam_lut_mode == 2) ? "RAM" :
310 ((s.igam_lut_mode == 3) ? "RAM" :
312 (s.dgam_lut_mode == 0) ? "Bypass" :
313 ((s.dgam_lut_mode == 1) ? "sRGB" :
314 ((s.dgam_lut_mode == 2) ? "Ycc" :
315 ((s.dgam_lut_mode == 3) ? "RAM" :
316 ((s.dgam_lut_mode == 4) ? "RAM" :
318 (s.rgam_lut_mode == 0) ? "Bypass" :
319 ((s.rgam_lut_mode == 1) ? "sRGB" :
320 ((s.rgam_lut_mode == 2) ? "Ycc" :
321 ((s.rgam_lut_mode == 3) ? "RAM" :
322 ((s.rgam_lut_mode == 4) ? "RAM" :
325 s.gamut_remap_c11_c12,
326 s.gamut_remap_c13_c14,
327 s.gamut_remap_c21_c22,
328 s.gamut_remap_c23_c24,
329 s.gamut_remap_c31_c32,
330 s.gamut_remap_c33_c34);
335 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
336 for (i = 0; i < pool->pipe_count; i++) {
337 struct mpcc_state s = {0};
339 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
341 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
342 i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
343 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
348 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
350 for (i = 0; i < pool->timing_generator_count; i++) {
351 struct timing_generator *tg = pool->timing_generators[i];
352 struct dcn_otg_state s = {0};
353 /* Read shared OTG state registers for all DCNx */
354 optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
357 * For DCN2 and greater, a register on the OPP is used to
358 * determine if the CRTC is blanked instead of the OTG. So use
359 * dpg_is_blanked() if exists, otherwise fallback on otg.
361 * TODO: Implement DCN-specific read_otg_state hooks.
363 if (pool->opps[i]->funcs->dpg_is_blanked)
364 s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
366 s.blank_enabled = tg->funcs->is_blanked(tg);
368 //only print if OTG master is enabled
369 if ((s.otg_enabled & 1) == 0)
372 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
390 s.underflow_occurred_status,
393 // Clear underflow for debug purposes
394 // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
395 // This function is called only from Windows or Diags test environment, hence it's safe to clear
396 // it from here without affecting the original intent.
397 tg->funcs->clear_optc_underflow(tg);
401 // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
402 // TODO: Update golden log header to reflect this name change
403 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
404 for (i = 0; i < pool->res_cap->num_dsc; i++) {
405 struct display_stream_compressor *dsc = pool->dscs[i];
406 struct dcn_dsc_state s = {0};
408 dsc->funcs->dsc_read_state(dsc, &s);
409 DTN_INFO("[%d]: %-9d %-12d %-10d\n",
413 s.dsc_bits_per_pixel);
418 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
419 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
420 for (i = 0; i < pool->stream_enc_count; i++) {
421 struct stream_encoder *enc = pool->stream_enc[i];
422 struct enc_state s = {0};
424 if (enc->funcs->enc_read_state) {
425 enc->funcs->enc_read_state(enc, &s);
426 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
429 s.sec_gsp_pps_line_num,
430 s.vbid6_line_reference,
432 s.sec_gsp_pps_enable,
433 s.sec_stream_enable);
439 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
440 for (i = 0; i < dc->link_count; i++) {
441 struct link_encoder *lenc = dc->links[i]->link_enc;
443 struct link_enc_state s = {0};
445 if (lenc->funcs->read_state) {
446 lenc->funcs->read_state(lenc, &s);
447 DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
450 s.dphy_fec_ready_shadow,
451 s.dphy_fec_active_status,
452 s.dp_link_training_complete);
458 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
459 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
460 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
461 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
462 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
463 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
464 dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
465 dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
466 dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
468 log_mpc_crc(dc, log_ctx);
471 if (pool->hpo_dp_stream_enc_count > 0) {
472 DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n");
473 for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
474 struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
475 struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
477 if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
478 hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
480 DTN_INFO("[%d]: %d %d %6s %d %d %d %d %d\n",
481 hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
482 hpo_dp_se_state.stream_enc_enabled,
483 hpo_dp_se_state.otg_inst,
484 (hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
485 ((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
486 (hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
487 (hpo_dp_se_state.component_depth == 0) ? 6 :
488 ((hpo_dp_se_state.component_depth == 1) ? 8 :
489 (hpo_dp_se_state.component_depth == 2) ? 10 : 12),
490 hpo_dp_se_state.vid_stream_enabled,
491 hpo_dp_se_state.sdp_enabled,
492 hpo_dp_se_state.compressed_format,
493 hpo_dp_se_state.mapped_to_link_enc);
500 /* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
501 if (pool->hpo_dp_link_enc_count) {
502 DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n");
504 for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
505 struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
506 struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
508 if (hpo_dp_link_enc->funcs->read_state) {
509 hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
510 DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n",
511 hpo_dp_link_enc->inst,
512 hpo_dp_le_state.link_enc_enabled,
513 (hpo_dp_le_state.link_mode == 0) ? "TPS1" :
514 (hpo_dp_le_state.link_mode == 1) ? "TPS2" :
515 (hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
516 hpo_dp_le_state.lane_count,
517 hpo_dp_le_state.stream_src[0],
518 hpo_dp_le_state.slot_count[0],
519 hpo_dp_le_state.vc_rate_x[0],
520 hpo_dp_le_state.vc_rate_y[0]);
532 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
534 struct hubp *hubp = pipe_ctx->plane_res.hubp;
535 struct timing_generator *tg = pipe_ctx->stream_res.tg;
537 if (tg->funcs->is_optc_underflow_occurred(tg)) {
538 tg->funcs->clear_optc_underflow(tg);
542 if (hubp->funcs->hubp_get_underflow_status(hubp)) {
543 hubp->funcs->hubp_clear_underflow(hubp);
549 void dcn10_enable_power_gating_plane(
550 struct dce_hwseq *hws,
553 bool force_on = true; /* disable power gating */
559 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
560 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
561 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
562 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
565 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
566 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
567 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
568 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
571 void dcn10_disable_vga(
572 struct dce_hwseq *hws)
574 unsigned int in_vga1_mode = 0;
575 unsigned int in_vga2_mode = 0;
576 unsigned int in_vga3_mode = 0;
577 unsigned int in_vga4_mode = 0;
579 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
580 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
581 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
582 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
584 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
585 in_vga3_mode == 0 && in_vga4_mode == 0)
588 REG_WRITE(D1VGA_CONTROL, 0);
589 REG_WRITE(D2VGA_CONTROL, 0);
590 REG_WRITE(D3VGA_CONTROL, 0);
591 REG_WRITE(D4VGA_CONTROL, 0);
593 /* HW Engineer's Notes:
594 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
595 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
597 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
598 * VGA_TEST_ENABLE, to leave it in the same state as before.
600 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
601 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
605 * dcn10_dpp_pg_control - DPP power gate control.
607 * @hws: dce_hwseq reference.
608 * @dpp_inst: DPP instance reference.
609 * @power_on: true if we want to enable power gate, false otherwise.
611 * Enable or disable power gate in the specific DPP instance.
613 void dcn10_dpp_pg_control(
614 struct dce_hwseq *hws,
615 unsigned int dpp_inst,
618 uint32_t power_gate = power_on ? 0 : 1;
619 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
621 if (hws->ctx->dc->debug.disable_dpp_power_gate)
623 if (REG(DOMAIN1_PG_CONFIG) == 0)
628 REG_UPDATE(DOMAIN1_PG_CONFIG,
629 DOMAIN1_POWER_GATE, power_gate);
631 REG_WAIT(DOMAIN1_PG_STATUS,
632 DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
636 REG_UPDATE(DOMAIN3_PG_CONFIG,
637 DOMAIN3_POWER_GATE, power_gate);
639 REG_WAIT(DOMAIN3_PG_STATUS,
640 DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
644 REG_UPDATE(DOMAIN5_PG_CONFIG,
645 DOMAIN5_POWER_GATE, power_gate);
647 REG_WAIT(DOMAIN5_PG_STATUS,
648 DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
652 REG_UPDATE(DOMAIN7_PG_CONFIG,
653 DOMAIN7_POWER_GATE, power_gate);
655 REG_WAIT(DOMAIN7_PG_STATUS,
656 DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
666 * dcn10_hubp_pg_control - HUBP power gate control.
668 * @hws: dce_hwseq reference.
669 * @hubp_inst: DPP instance reference.
670 * @power_on: true if we want to enable power gate, false otherwise.
672 * Enable or disable power gate in the specific HUBP instance.
674 void dcn10_hubp_pg_control(
675 struct dce_hwseq *hws,
676 unsigned int hubp_inst,
679 uint32_t power_gate = power_on ? 0 : 1;
680 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
682 if (hws->ctx->dc->debug.disable_hubp_power_gate)
684 if (REG(DOMAIN0_PG_CONFIG) == 0)
688 case 0: /* DCHUBP0 */
689 REG_UPDATE(DOMAIN0_PG_CONFIG,
690 DOMAIN0_POWER_GATE, power_gate);
692 REG_WAIT(DOMAIN0_PG_STATUS,
693 DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
696 case 1: /* DCHUBP1 */
697 REG_UPDATE(DOMAIN2_PG_CONFIG,
698 DOMAIN2_POWER_GATE, power_gate);
700 REG_WAIT(DOMAIN2_PG_STATUS,
701 DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
704 case 2: /* DCHUBP2 */
705 REG_UPDATE(DOMAIN4_PG_CONFIG,
706 DOMAIN4_POWER_GATE, power_gate);
708 REG_WAIT(DOMAIN4_PG_STATUS,
709 DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
712 case 3: /* DCHUBP3 */
713 REG_UPDATE(DOMAIN6_PG_CONFIG,
714 DOMAIN6_POWER_GATE, power_gate);
716 REG_WAIT(DOMAIN6_PG_STATUS,
717 DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
726 static void power_on_plane(
727 struct dce_hwseq *hws,
730 DC_LOGGER_INIT(hws->ctx->logger);
731 if (REG(DC_IP_REQUEST_CNTL)) {
732 REG_SET(DC_IP_REQUEST_CNTL, 0,
735 if (hws->funcs.dpp_pg_control)
736 hws->funcs.dpp_pg_control(hws, plane_id, true);
738 if (hws->funcs.hubp_pg_control)
739 hws->funcs.hubp_pg_control(hws, plane_id, true);
741 REG_SET(DC_IP_REQUEST_CNTL, 0,
744 "Un-gated front end for pipe %d\n", plane_id);
748 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
750 struct dce_hwseq *hws = dc->hwseq;
751 struct hubp *hubp = dc->res_pool->hubps[0];
753 if (!hws->wa_state.DEGVIDCN10_253_applied)
756 hubp->funcs->set_blank(hubp, true);
758 REG_SET(DC_IP_REQUEST_CNTL, 0,
761 hws->funcs.hubp_pg_control(hws, 0, false);
762 REG_SET(DC_IP_REQUEST_CNTL, 0,
765 hws->wa_state.DEGVIDCN10_253_applied = false;
768 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
770 struct dce_hwseq *hws = dc->hwseq;
771 struct hubp *hubp = dc->res_pool->hubps[0];
774 if (dc->debug.disable_stutter)
777 if (!hws->wa.DEGVIDCN10_253)
780 for (i = 0; i < dc->res_pool->pipe_count; i++) {
781 if (!dc->res_pool->hubps[i]->power_gated)
785 /* all pipe power gated, apply work around to enable stutter. */
787 REG_SET(DC_IP_REQUEST_CNTL, 0,
790 hws->funcs.hubp_pg_control(hws, 0, true);
791 REG_SET(DC_IP_REQUEST_CNTL, 0,
794 hubp->funcs->set_hubp_blank_en(hubp, false);
795 hws->wa_state.DEGVIDCN10_253_applied = true;
798 void dcn10_bios_golden_init(struct dc *dc)
800 struct dce_hwseq *hws = dc->hwseq;
801 struct dc_bios *bp = dc->ctx->dc_bios;
803 bool allow_self_fresh_force_enable = true;
805 if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
808 if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
809 allow_self_fresh_force_enable =
810 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
813 /* WA for making DF sleep when idle after resume from S0i3.
814 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
815 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
816 * before calling command table and it changed to 1 after,
817 * it should be set back to 0.
820 /* initialize dcn global */
821 bp->funcs->enable_disp_power_gating(bp,
822 CONTROLLER_ID_D0, ASIC_PIPE_INIT);
824 for (i = 0; i < dc->res_pool->pipe_count; i++) {
825 /* initialize dcn per pipe */
826 bp->funcs->enable_disp_power_gating(bp,
827 CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
830 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
831 if (allow_self_fresh_force_enable == false &&
832 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
833 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
834 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
838 static void false_optc_underflow_wa(
840 const struct dc_stream_state *stream,
841 struct timing_generator *tg)
846 if (!dc->hwseq->wa.false_optc_underflow)
849 underflow = tg->funcs->is_optc_underflow_occurred(tg);
851 for (i = 0; i < dc->res_pool->pipe_count; i++) {
852 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
854 if (old_pipe_ctx->stream != stream)
857 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
860 if (tg->funcs->set_blank_data_double_buffer)
861 tg->funcs->set_blank_data_double_buffer(tg, true);
863 if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
864 tg->funcs->clear_optc_underflow(tg);
867 enum dc_status dcn10_enable_stream_timing(
868 struct pipe_ctx *pipe_ctx,
869 struct dc_state *context,
872 struct dc_stream_state *stream = pipe_ctx->stream;
873 enum dc_color_space color_space;
874 struct tg_color black_color = {0};
876 /* by upper caller loop, pipe0 is parent pipe and be called first.
877 * back end is set up by for pipe0. Other children pipe share back end
878 * with pipe 0. No program is needed.
880 if (pipe_ctx->top_pipe != NULL)
883 /* TODO check if timing_changed, disable stream if timing changed */
885 /* HW program guide assume display already disable
886 * by unplug sequence. OTG assume stop.
888 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
890 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
891 pipe_ctx->clock_source,
892 &pipe_ctx->stream_res.pix_clk_params,
893 &pipe_ctx->pll_settings)) {
895 return DC_ERROR_UNEXPECTED;
898 pipe_ctx->stream_res.tg->funcs->program_timing(
899 pipe_ctx->stream_res.tg,
901 pipe_ctx->pipe_dlg_param.vready_offset,
902 pipe_ctx->pipe_dlg_param.vstartup_start,
903 pipe_ctx->pipe_dlg_param.vupdate_offset,
904 pipe_ctx->pipe_dlg_param.vupdate_width,
905 pipe_ctx->stream->signal,
908 #if 0 /* move to after enable_crtc */
909 /* TODO: OPP FMT, ABM. etc. should be done here. */
910 /* or FPGA now. instance 0 only. TODO: move to opp.c */
912 inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
914 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
915 pipe_ctx->stream_res.opp,
916 &stream->bit_depth_params,
919 /* program otg blank color */
920 color_space = stream->output_color_space;
921 color_space_to_black_color(dc, color_space, &black_color);
924 * The way 420 is packed, 2 channels carry Y component, 1 channel
925 * alternate between Cb and Cr, so both channels need the pixel
928 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
929 black_color.color_r_cr = black_color.color_g_y;
931 if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
932 pipe_ctx->stream_res.tg->funcs->set_blank_color(
933 pipe_ctx->stream_res.tg,
936 if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
937 !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
938 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
939 hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
940 false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
943 /* VTG is within DCHUB command block. DCFCLK is always on */
944 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
946 return DC_ERROR_UNEXPECTED;
949 /* TODO program crtc source select for non-virtual signal*/
950 /* TODO program FMT */
951 /* TODO setup link_enc */
952 /* TODO set stream attributes */
953 /* TODO program audio */
954 /* TODO enable stream if timing changed */
955 /* TODO unblank stream if DP */
960 static void dcn10_reset_back_end_for_pipe(
962 struct pipe_ctx *pipe_ctx,
963 struct dc_state *context)
966 struct dc_link *link;
967 DC_LOGGER_INIT(dc->ctx->logger);
968 if (pipe_ctx->stream_res.stream_enc == NULL) {
969 pipe_ctx->stream = NULL;
973 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
974 link = pipe_ctx->stream->link;
975 /* DPMS may already disable or */
976 /* dpms_off status is incorrect due to fastboot
977 * feature. When system resume from S4 with second
978 * screen only, the dpms_off would be true but
979 * VBIOS lit up eDP, so check link status too.
981 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
982 core_link_disable_stream(pipe_ctx);
983 else if (pipe_ctx->stream_res.audio)
984 dc->hwss.disable_audio_stream(pipe_ctx);
986 if (pipe_ctx->stream_res.audio) {
987 /*disable az_endpoint*/
988 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
991 if (dc->caps.dynamic_audio == true) {
992 /*we have to dynamic arbitrate the audio endpoints*/
993 /*we free the resource, need reset is_audio_acquired*/
994 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
995 pipe_ctx->stream_res.audio, false);
996 pipe_ctx->stream_res.audio = NULL;
1001 /* by upper caller loop, parent pipe: pipe0, will be reset last.
1002 * back end share by all pipes and will be disable only when disable
1005 if (pipe_ctx->top_pipe == NULL) {
1007 if (pipe_ctx->stream_res.abm)
1008 dc->hwss.set_abm_immediate_disable(pipe_ctx);
1010 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1012 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1013 if (pipe_ctx->stream_res.tg->funcs->set_drr)
1014 pipe_ctx->stream_res.tg->funcs->set_drr(
1015 pipe_ctx->stream_res.tg, NULL);
1018 for (i = 0; i < dc->res_pool->pipe_count; i++)
1019 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1022 if (i == dc->res_pool->pipe_count)
1025 pipe_ctx->stream = NULL;
1026 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1027 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1030 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
1034 bool need_recover = true;
1036 if (!dc->debug.recovery_enabled)
1039 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1040 struct pipe_ctx *pipe_ctx =
1041 &dc->current_state->res_ctx.pipe_ctx[i];
1042 if (pipe_ctx != NULL) {
1043 hubp = pipe_ctx->plane_res.hubp;
1044 if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
1045 if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
1046 /* one pipe underflow, we will reset all the pipes*/
1047 need_recover = true;
1055 DCHUBP_CNTL:HUBP_BLANK_EN=1
1056 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1057 DCHUBP_CNTL:HUBP_DISABLE=1
1058 DCHUBP_CNTL:HUBP_DISABLE=0
1059 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1060 DCSURF_PRIMARY_SURFACE_ADDRESS
1061 DCHUBP_CNTL:HUBP_BLANK_EN=0
1064 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1065 struct pipe_ctx *pipe_ctx =
1066 &dc->current_state->res_ctx.pipe_ctx[i];
1067 if (pipe_ctx != NULL) {
1068 hubp = pipe_ctx->plane_res.hubp;
1069 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1070 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1071 hubp->funcs->set_hubp_blank_en(hubp, true);
1074 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1075 hubbub1_soft_reset(dc->res_pool->hubbub, true);
1077 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1078 struct pipe_ctx *pipe_ctx =
1079 &dc->current_state->res_ctx.pipe_ctx[i];
1080 if (pipe_ctx != NULL) {
1081 hubp = pipe_ctx->plane_res.hubp;
1082 /*DCHUBP_CNTL:HUBP_DISABLE=1*/
1083 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1084 hubp->funcs->hubp_disable_control(hubp, true);
1087 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1088 struct pipe_ctx *pipe_ctx =
1089 &dc->current_state->res_ctx.pipe_ctx[i];
1090 if (pipe_ctx != NULL) {
1091 hubp = pipe_ctx->plane_res.hubp;
1092 /*DCHUBP_CNTL:HUBP_DISABLE=0*/
1093 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1094 hubp->funcs->hubp_disable_control(hubp, true);
1097 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1098 hubbub1_soft_reset(dc->res_pool->hubbub, false);
1099 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1100 struct pipe_ctx *pipe_ctx =
1101 &dc->current_state->res_ctx.pipe_ctx[i];
1102 if (pipe_ctx != NULL) {
1103 hubp = pipe_ctx->plane_res.hubp;
1104 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1105 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1106 hubp->funcs->set_hubp_blank_en(hubp, true);
1113 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1115 struct hubbub *hubbub = dc->res_pool->hubbub;
1116 static bool should_log_hw_state; /* prevent hw state log by default */
1118 if (!hubbub->funcs->verify_allow_pstate_change_high)
1121 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
1124 if (should_log_hw_state)
1125 dcn10_log_hw_state(dc, NULL);
1127 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1128 BREAK_TO_DEBUGGER();
1129 if (dcn10_hw_wa_force_recovery(dc)) {
1131 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
1132 BREAK_TO_DEBUGGER();
1137 /* trigger HW to start disconnect plane from stream on the next vsync */
1138 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1140 struct dce_hwseq *hws = dc->hwseq;
1141 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1142 int dpp_id = pipe_ctx->plane_res.dpp->inst;
1143 struct mpc *mpc = dc->res_pool->mpc;
1144 struct mpc_tree *mpc_tree_params;
1145 struct mpcc *mpcc_to_remove = NULL;
1146 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1148 mpc_tree_params = &(opp->mpc_tree_params);
1149 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1152 if (mpcc_to_remove == NULL)
1155 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1157 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1159 dc->optimized_required = true;
1161 if (hubp->funcs->hubp_disconnect)
1162 hubp->funcs->hubp_disconnect(hubp);
1164 if (dc->debug.sanity_checks)
1165 hws->funcs.verify_allow_pstate_change_high(dc);
1169 * dcn10_plane_atomic_power_down - Power down plane components.
1171 * @dc: dc struct reference. used for grab hwseq.
1172 * @dpp: dpp struct reference.
1173 * @hubp: hubp struct reference.
1175 * Keep in mind that this operation requires a power gate configuration;
1176 * however, requests for switch power gate are precisely controlled to avoid
1177 * problems. For this reason, power gate request is usually disabled. This
1178 * function first needs to enable the power gate request before disabling DPP
1179 * and HUBP. Finally, it disables the power gate request again.
1181 void dcn10_plane_atomic_power_down(struct dc *dc,
1185 struct dce_hwseq *hws = dc->hwseq;
1186 DC_LOGGER_INIT(dc->ctx->logger);
1188 if (REG(DC_IP_REQUEST_CNTL)) {
1189 REG_SET(DC_IP_REQUEST_CNTL, 0,
1192 if (hws->funcs.dpp_pg_control)
1193 hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1195 if (hws->funcs.hubp_pg_control)
1196 hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1198 dpp->funcs->dpp_reset(dpp);
1199 REG_SET(DC_IP_REQUEST_CNTL, 0,
1202 "Power gated front end %d\n", hubp->inst);
1206 /* disable HW used by plane.
1207 * note: cannot disable until disconnect is complete
1209 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1211 struct dce_hwseq *hws = dc->hwseq;
1212 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1213 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1214 int opp_id = hubp->opp_id;
1216 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1218 hubp->funcs->hubp_clk_cntl(hubp, false);
1220 dpp->funcs->dpp_dppclk_control(dpp, false, false);
1222 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1223 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1224 pipe_ctx->stream_res.opp,
1227 hubp->power_gated = true;
1228 dc->optimized_required = false; /* We're powering off, no need to optimize */
1230 hws->funcs.plane_atomic_power_down(dc,
1231 pipe_ctx->plane_res.dpp,
1232 pipe_ctx->plane_res.hubp);
1234 pipe_ctx->stream = NULL;
1235 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1236 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1237 pipe_ctx->top_pipe = NULL;
1238 pipe_ctx->bottom_pipe = NULL;
1239 pipe_ctx->plane_state = NULL;
1242 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1244 struct dce_hwseq *hws = dc->hwseq;
1245 DC_LOGGER_INIT(dc->ctx->logger);
1247 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1250 hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1252 apply_DEGVIDCN10_253_wa(dc);
1254 DC_LOG_DC("Power down front end %d\n",
1255 pipe_ctx->pipe_idx);
1258 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1261 struct dce_hwseq *hws = dc->hwseq;
1262 bool can_apply_seamless_boot = false;
1264 for (i = 0; i < context->stream_count; i++) {
1265 if (context->streams[i]->apply_seamless_boot_optimization) {
1266 can_apply_seamless_boot = true;
1271 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1272 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1273 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1275 /* There is assumption that pipe_ctx is not mapping irregularly
1276 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1277 * we will use the pipe, so don't disable
1279 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1282 /* Blank controller using driver code instead of
1285 if (tg->funcs->is_tg_enabled(tg)) {
1286 if (hws->funcs.init_blank != NULL) {
1287 hws->funcs.init_blank(dc, tg);
1288 tg->funcs->lock(tg);
1290 tg->funcs->lock(tg);
1291 tg->funcs->set_blank(tg, true);
1292 hwss_wait_for_blank_complete(tg);
1297 /* num_opp will be equal to number of mpcc */
1298 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1299 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1301 /* Cannot reset the MPC mux if seamless boot */
1302 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1305 dc->res_pool->mpc->funcs->mpc_init_single_inst(
1306 dc->res_pool->mpc, i);
1309 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1310 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1311 struct hubp *hubp = dc->res_pool->hubps[i];
1312 struct dpp *dpp = dc->res_pool->dpps[i];
1313 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1315 /* There is assumption that pipe_ctx is not mapping irregularly
1316 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1317 * we will use the pipe, so don't disable
1319 if (can_apply_seamless_boot &&
1320 pipe_ctx->stream != NULL &&
1321 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1322 pipe_ctx->stream_res.tg)) {
1323 // Enable double buffering for OTG_BLANK no matter if
1324 // seamless boot is enabled or not to suppress global sync
1325 // signals when OTG blanked. This is to prevent pipe from
1326 // requesting data while in PSR.
1327 tg->funcs->tg_init(tg);
1328 hubp->power_gated = true;
1332 /* Disable on the current state so the new one isn't cleared. */
1333 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1335 dpp->funcs->dpp_reset(dpp);
1337 pipe_ctx->stream_res.tg = tg;
1338 pipe_ctx->pipe_idx = i;
1340 pipe_ctx->plane_res.hubp = hubp;
1341 pipe_ctx->plane_res.dpp = dpp;
1342 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1343 hubp->mpcc_id = dpp->inst;
1344 hubp->opp_id = OPP_ID_INVALID;
1345 hubp->power_gated = false;
1347 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1348 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1349 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1350 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1352 hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1354 if (tg->funcs->is_tg_enabled(tg))
1355 tg->funcs->unlock(tg);
1357 dc->hwss.disable_plane(dc, pipe_ctx);
1359 pipe_ctx->stream_res.tg = NULL;
1360 pipe_ctx->plane_res.hubp = NULL;
1362 tg->funcs->tg_init(tg);
1365 /* Power gate DSCs */
1366 if (hws->funcs.dsc_pg_control != NULL) {
1367 uint32_t num_opps = 0;
1368 uint32_t opp_id_src0 = OPP_ID_INVALID;
1369 uint32_t opp_id_src1 = OPP_ID_INVALID;
1371 // Step 1: To find out which OPTC is running & OPTC DSC is ON
1372 // We can't use res_pool->res_cap->num_timing_generator to check
1373 // Because it records display pipes default setting built in driver,
1374 // not display pipes of the current chip.
1375 // Some ASICs would be fused display pipes less than the default setting.
1376 // In dcnxx_resource_construct function, driver would obatin real information.
1377 for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1378 uint32_t optc_dsc_state = 0;
1379 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1381 if (tg->funcs->is_tg_enabled(tg)) {
1382 if (tg->funcs->get_dsc_status)
1383 tg->funcs->get_dsc_status(tg, &optc_dsc_state);
1384 // Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
1385 // non-zero value is DSC enabled
1386 if (optc_dsc_state != 0) {
1387 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1393 // Step 2: To power down DSC but skip DSC of running OPTC
1394 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1395 struct dcn_dsc_state s = {0};
1397 dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
1399 if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
1400 s.dsc_clock_en && s.dsc_fw_en)
1403 hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
1408 void dcn10_init_hw(struct dc *dc)
1411 struct abm *abm = dc->res_pool->abm;
1412 struct dmcu *dmcu = dc->res_pool->dmcu;
1413 struct dce_hwseq *hws = dc->hwseq;
1414 struct dc_bios *dcb = dc->ctx->dc_bios;
1415 struct resource_pool *res_pool = dc->res_pool;
1416 uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1417 bool is_optimized_init_done = false;
1419 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1420 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1422 /* Align bw context with hw config when system resume. */
1423 if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
1424 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
1425 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
1428 // Initialize the dccg
1429 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1430 dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1432 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1434 REG_WRITE(REFCLK_CNTL, 0);
1435 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1436 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1438 if (!dc->debug.disable_clock_gate) {
1439 /* enable all DCN clock gating */
1440 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1442 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1444 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1447 //Enable ability to power gate / don't force power on permanently
1448 if (hws->funcs.enable_power_gating_plane)
1449 hws->funcs.enable_power_gating_plane(hws, true);
1454 if (!dcb->funcs->is_accelerated_mode(dcb))
1455 hws->funcs.disable_vga(dc->hwseq);
1457 hws->funcs.bios_golden_init(dc);
1459 if (dc->ctx->dc_bios->fw_info_valid) {
1460 res_pool->ref_clocks.xtalin_clock_inKhz =
1461 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1463 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1464 if (res_pool->dccg && res_pool->hubbub) {
1466 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1467 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1468 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
1470 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1471 res_pool->ref_clocks.dccg_ref_clock_inKhz,
1472 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
1474 // Not all ASICs have DCCG sw component
1475 res_pool->ref_clocks.dccg_ref_clock_inKhz =
1476 res_pool->ref_clocks.xtalin_clock_inKhz;
1477 res_pool->ref_clocks.dchub_ref_clock_inKhz =
1478 res_pool->ref_clocks.xtalin_clock_inKhz;
1482 ASSERT_CRITICAL(false);
1484 for (i = 0; i < dc->link_count; i++) {
1485 /* Power up AND update implementation according to the
1486 * required signal (which may be different from the
1487 * default signal on connector).
1489 struct dc_link *link = dc->links[i];
1491 if (!is_optimized_init_done)
1492 link->link_enc->funcs->hw_init(link->link_enc);
1494 /* Check for enabled DIG to identify enabled display */
1495 if (link->link_enc->funcs->is_dig_enabled &&
1496 link->link_enc->funcs->is_dig_enabled(link->link_enc))
1497 link->link_status.link_active = true;
1500 /* Power gate DSCs */
1501 if (!is_optimized_init_done) {
1502 for (i = 0; i < res_pool->res_cap->num_dsc; i++)
1503 if (hws->funcs.dsc_pg_control != NULL)
1504 hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
1507 /* we want to turn off all dp displays before doing detection */
1508 dc_link_blank_all_dp_displays(dc);
1510 /* If taking control over from VBIOS, we may want to optimize our first
1511 * mode set, so we need to skip powering down pipes until we know which
1512 * pipes we want to use.
1513 * Otherwise, if taking control is not possible, we need to power
1516 if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
1517 if (!is_optimized_init_done) {
1518 hws->funcs.init_pipes(dc, dc->current_state);
1519 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1520 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1521 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1525 if (!is_optimized_init_done) {
1527 for (i = 0; i < res_pool->audio_count; i++) {
1528 struct audio *audio = res_pool->audios[i];
1530 audio->funcs->hw_init(audio);
1533 for (i = 0; i < dc->link_count; i++) {
1534 struct dc_link *link = dc->links[i];
1536 if (link->panel_cntl)
1537 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1541 abm->funcs->abm_init(abm, backlight);
1543 if (dmcu != NULL && !dmcu->auto_load_dmcu)
1544 dmcu->funcs->dmcu_init(dmcu);
1547 if (abm != NULL && dmcu != NULL)
1548 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1550 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1551 if (!is_optimized_init_done)
1552 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1554 if (!dc->debug.disable_clock_gate) {
1555 /* enable all DCN clock gating */
1556 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1558 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1560 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1562 if (hws->funcs.enable_power_gating_plane)
1563 hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1565 if (dc->clk_mgr->funcs->notify_wm_ranges)
1566 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1569 /* In headless boot cases, DIG may be turned
1570 * on which causes HW/SW discrepancies.
1571 * To avoid this, power down hardware on boot
1572 * if DIG is turned on
1574 void dcn10_power_down_on_boot(struct dc *dc)
1576 struct dc_link *edp_links[MAX_NUM_EDP];
1577 struct dc_link *edp_link = NULL;
1581 get_edp_links(dc, edp_links, &edp_num);
1583 edp_link = edp_links[0];
1585 if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1586 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1587 dc->hwseq->funcs.edp_backlight_control &&
1588 dc->hwss.power_down &&
1589 dc->hwss.edp_power_control) {
1590 dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1591 dc->hwss.power_down(dc);
1592 dc->hwss.edp_power_control(edp_link, false);
1594 for (i = 0; i < dc->link_count; i++) {
1595 struct dc_link *link = dc->links[i];
1597 if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
1598 link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1599 dc->hwss.power_down) {
1600 dc->hwss.power_down(dc);
1608 * Call update_clocks with empty context
1609 * to send DISPLAY_OFF
1610 * Otherwise DISPLAY_OFF may not be asserted
1612 if (dc->clk_mgr->funcs->set_low_power_state)
1613 dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1616 void dcn10_reset_hw_ctx_wrap(
1618 struct dc_state *context)
1621 struct dce_hwseq *hws = dc->hwseq;
1624 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1625 struct pipe_ctx *pipe_ctx_old =
1626 &dc->current_state->res_ctx.pipe_ctx[i];
1627 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1629 if (!pipe_ctx_old->stream)
1632 if (pipe_ctx_old->top_pipe)
1635 if (!pipe_ctx->stream ||
1636 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1637 struct clock_source *old_clk = pipe_ctx_old->clock_source;
1639 dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1640 if (hws->funcs.enable_stream_gating)
1641 hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1643 old_clk->funcs->cs_power_down(old_clk);
1648 static bool patch_address_for_sbs_tb_stereo(
1649 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1651 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1652 bool sec_split = pipe_ctx->top_pipe &&
1653 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1654 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1655 (pipe_ctx->stream->timing.timing_3d_format ==
1656 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1657 pipe_ctx->stream->timing.timing_3d_format ==
1658 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1659 *addr = plane_state->address.grph_stereo.left_addr;
1660 plane_state->address.grph_stereo.left_addr =
1661 plane_state->address.grph_stereo.right_addr;
1664 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1665 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1666 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1667 plane_state->address.grph_stereo.right_addr =
1668 plane_state->address.grph_stereo.left_addr;
1669 plane_state->address.grph_stereo.right_meta_addr =
1670 plane_state->address.grph_stereo.left_meta_addr;
1676 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1678 bool addr_patched = false;
1679 PHYSICAL_ADDRESS_LOC addr;
1680 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1682 if (plane_state == NULL)
1685 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1687 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1688 pipe_ctx->plane_res.hubp,
1689 &plane_state->address,
1690 plane_state->flip_immediate);
1692 plane_state->status.requested_address = plane_state->address;
1694 if (plane_state->flip_immediate)
1695 plane_state->status.current_address = plane_state->address;
1698 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1701 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1702 const struct dc_plane_state *plane_state)
1704 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1705 const struct dc_transfer_func *tf = NULL;
1708 if (dpp_base == NULL)
1711 if (plane_state->in_transfer_func)
1712 tf = plane_state->in_transfer_func;
1714 if (plane_state->gamma_correction &&
1715 !dpp_base->ctx->dc->debug.always_use_regamma
1716 && !plane_state->gamma_correction->is_identity
1717 && dce_use_lut(plane_state->format))
1718 dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1721 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1722 else if (tf->type == TF_TYPE_PREDEFINED) {
1724 case TRANSFER_FUNCTION_SRGB:
1725 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1727 case TRANSFER_FUNCTION_BT709:
1728 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1730 case TRANSFER_FUNCTION_LINEAR:
1731 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1733 case TRANSFER_FUNCTION_PQ:
1734 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1735 cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1736 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1743 } else if (tf->type == TF_TYPE_BYPASS) {
1744 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1746 cm_helper_translate_curve_to_degamma_hw_format(tf,
1747 &dpp_base->degamma_params);
1748 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1749 &dpp_base->degamma_params);
1756 #define MAX_NUM_HW_POINTS 0x200
1758 static void log_tf(struct dc_context *ctx,
1759 struct dc_transfer_func *tf, uint32_t hw_points_num)
1761 // DC_LOG_GAMMA is default logging of all hw points
1762 // DC_LOG_ALL_GAMMA logs all points, not only hw points
1763 // DC_LOG_ALL_TF_POINTS logs all channels of the tf
1766 DC_LOGGER_INIT(ctx->logger);
1767 DC_LOG_GAMMA("Gamma Correction TF");
1768 DC_LOG_ALL_GAMMA("Logging all tf points...");
1769 DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1771 for (i = 0; i < hw_points_num; i++) {
1772 DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1773 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1774 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1777 for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1778 DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1779 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1780 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1784 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1785 const struct dc_stream_state *stream)
1787 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1792 dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1794 if (stream->out_transfer_func &&
1795 stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1796 stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1797 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1799 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1802 else if (cm_helper_translate_curve_to_hw_format(
1803 stream->out_transfer_func,
1804 &dpp->regamma_params, false)) {
1805 dpp->funcs->dpp_program_regamma_pwl(
1807 &dpp->regamma_params, OPP_REGAMMA_USER);
1809 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1811 if (stream != NULL && stream->ctx != NULL &&
1812 stream->out_transfer_func != NULL) {
1814 stream->out_transfer_func,
1815 dpp->regamma_params.hw_points_num);
1821 void dcn10_pipe_control_lock(
1823 struct pipe_ctx *pipe,
1826 struct dce_hwseq *hws = dc->hwseq;
1828 /* use TG master update lock to lock everything on the TG
1829 * therefore only top pipe need to lock
1831 if (!pipe || pipe->top_pipe)
1834 if (dc->debug.sanity_checks)
1835 hws->funcs.verify_allow_pstate_change_high(dc);
1838 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1840 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1842 if (dc->debug.sanity_checks)
1843 hws->funcs.verify_allow_pstate_change_high(dc);
1847 * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1849 * Software keepout workaround to prevent cursor update locking from stalling
1850 * out cursor updates indefinitely or from old values from being retained in
1851 * the case where the viewport changes in the same frame as the cursor.
1853 * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1854 * too close to VUPDATE, then stall out until VUPDATE finishes.
1856 * TODO: Optimize cursor programming to be once per frame before VUPDATE
1857 * to avoid the need for this workaround.
1859 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1861 struct dc_stream_state *stream = pipe_ctx->stream;
1862 struct crtc_position position;
1863 uint32_t vupdate_start, vupdate_end;
1864 unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1865 unsigned int us_per_line, us_vupdate;
1867 if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1870 if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1873 dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1876 dc->hwss.get_position(&pipe_ctx, 1, &position);
1877 vpos = position.vertical_count;
1879 /* Avoid wraparound calculation issues */
1880 vupdate_start += stream->timing.v_total;
1881 vupdate_end += stream->timing.v_total;
1882 vpos += stream->timing.v_total;
1884 if (vpos <= vupdate_start) {
1885 /* VPOS is in VACTIVE or back porch. */
1886 lines_to_vupdate = vupdate_start - vpos;
1887 } else if (vpos > vupdate_end) {
1888 /* VPOS is in the front porch. */
1891 /* VPOS is in VUPDATE. */
1892 lines_to_vupdate = 0;
1895 /* Calculate time until VUPDATE in microseconds. */
1897 stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1898 us_to_vupdate = lines_to_vupdate * us_per_line;
1900 /* 70 us is a conservative estimate of cursor update time*/
1901 if (us_to_vupdate > 70)
1904 /* Stall out until the cursor update completes. */
1905 if (vupdate_end < vupdate_start)
1906 vupdate_end += stream->timing.v_total;
1907 us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1908 udelay(us_to_vupdate + us_vupdate);
1911 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1913 /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1914 if (!pipe || pipe->top_pipe)
1917 /* Prevent cursor lock from stalling out cursor updates. */
1919 delay_cursor_until_vupdate(dc, pipe);
1921 if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1922 union dmub_hw_lock_flags hw_locks = { 0 };
1923 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1925 hw_locks.bits.lock_cursor = 1;
1926 inst_flags.opp_inst = pipe->stream_res.opp->inst;
1928 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1933 dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1934 pipe->stream_res.opp->inst, lock);
1937 static bool wait_for_reset_trigger_to_occur(
1938 struct dc_context *dc_ctx,
1939 struct timing_generator *tg)
1943 /* To avoid endless loop we wait at most
1944 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1945 const uint32_t frames_to_wait_on_triggered_reset = 10;
1948 for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1950 if (!tg->funcs->is_counter_moving(tg)) {
1951 DC_ERROR("TG counter is not moving!\n");
1955 if (tg->funcs->did_triggered_reset_occur(tg)) {
1957 /* usually occurs at i=1 */
1958 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
1963 /* Wait for one frame. */
1964 tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
1965 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
1969 DC_ERROR("GSL: Timeout on reset trigger!\n");
1974 static uint64_t reduceSizeAndFraction(uint64_t *numerator,
1975 uint64_t *denominator,
1976 bool checkUint32Bounary)
1979 bool ret = checkUint32Bounary == false;
1980 uint64_t max_int32 = 0xffffffff;
1981 uint64_t num, denom;
1982 static const uint16_t prime_numbers[] = {
1983 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
1984 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
1985 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
1986 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
1987 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
1988 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
1989 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
1990 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
1991 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
1992 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
1993 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
1994 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
1995 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
1996 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
1997 941, 947, 953, 967, 971, 977, 983, 991, 997};
1998 int count = ARRAY_SIZE(prime_numbers);
2001 denom = *denominator;
2002 for (i = 0; i < count; i++) {
2003 uint32_t num_remainder, denom_remainder;
2004 uint64_t num_result, denom_result;
2005 if (checkUint32Bounary &&
2006 num <= max_int32 && denom <= max_int32) {
2011 num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
2012 denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
2013 if (num_remainder == 0 && denom_remainder == 0) {
2015 denom = denom_result;
2017 } while (num_remainder == 0 && denom_remainder == 0);
2020 *denominator = denom;
2024 static bool is_low_refresh_rate(struct pipe_ctx *pipe)
2026 uint32_t master_pipe_refresh_rate =
2027 pipe->stream->timing.pix_clk_100hz * 100 /
2028 pipe->stream->timing.h_total /
2029 pipe->stream->timing.v_total;
2030 return master_pipe_refresh_rate <= 30;
2033 static uint8_t get_clock_divider(struct pipe_ctx *pipe,
2034 bool account_low_refresh_rate)
2036 uint32_t clock_divider = 1;
2037 uint32_t numpipes = 1;
2039 if (account_low_refresh_rate && is_low_refresh_rate(pipe))
2042 if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2045 while (pipe->next_odm_pipe) {
2046 pipe = pipe->next_odm_pipe;
2049 clock_divider *= numpipes;
2051 return clock_divider;
2054 static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
2055 struct pipe_ctx *grouped_pipes[])
2057 struct dc_context *dc_ctx = dc->ctx;
2058 int i, master = -1, embedded = -1;
2059 struct dc_crtc_timing hw_crtc_timing[MAX_PIPES] = {0};
2060 uint64_t phase[MAX_PIPES];
2061 uint64_t modulo[MAX_PIPES];
2064 uint32_t embedded_pix_clk_100hz;
2065 uint16_t embedded_h_total;
2066 uint16_t embedded_v_total;
2067 uint32_t dp_ref_clk_100hz =
2068 dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2070 if (dc->config.vblank_alignment_dto_params &&
2071 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2073 (dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2075 (dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2076 embedded_pix_clk_100hz =
2077 dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2079 for (i = 0; i < group_size; i++) {
2080 grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2081 grouped_pipes[i]->stream_res.tg,
2082 &hw_crtc_timing[i]);
2083 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2084 dc->res_pool->dp_clock_source,
2085 grouped_pipes[i]->stream_res.tg->inst,
2087 hw_crtc_timing[i].pix_clk_100hz = pclk;
2088 if (dc_is_embedded_signal(
2089 grouped_pipes[i]->stream->signal)) {
2092 phase[i] = embedded_pix_clk_100hz*100;
2093 modulo[i] = dp_ref_clk_100hz*100;
2096 phase[i] = (uint64_t)embedded_pix_clk_100hz*
2097 hw_crtc_timing[i].h_total*
2098 hw_crtc_timing[i].v_total;
2099 phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2100 modulo[i] = (uint64_t)dp_ref_clk_100hz*
2104 if (reduceSizeAndFraction(&phase[i],
2105 &modulo[i], true) == false) {
2107 * this will help to stop reporting
2108 * this timing synchronizable
2110 DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2111 grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2116 for (i = 0; i < group_size; i++) {
2117 if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2118 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2119 dc->res_pool->dp_clock_source,
2120 grouped_pipes[i]->stream_res.tg->inst,
2121 phase[i], modulo[i]);
2122 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2123 dc->res_pool->dp_clock_source,
2124 grouped_pipes[i]->stream_res.tg->inst, &pclk);
2125 grouped_pipes[i]->stream->timing.pix_clk_100hz =
2126 pclk*get_clock_divider(grouped_pipes[i], false);
2136 void dcn10_enable_vblanks_synchronization(
2140 struct pipe_ctx *grouped_pipes[])
2142 struct dc_context *dc_ctx = dc->ctx;
2143 struct output_pixel_processor *opp;
2144 struct timing_generator *tg;
2145 int i, width, height, master;
2147 for (i = 1; i < group_size; i++) {
2148 opp = grouped_pipes[i]->stream_res.opp;
2149 tg = grouped_pipes[i]->stream_res.tg;
2150 tg->funcs->get_otg_active_size(tg, &width, &height);
2151 if (opp->funcs->opp_program_dpg_dimensions)
2152 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2155 for (i = 0; i < group_size; i++) {
2156 if (grouped_pipes[i]->stream == NULL)
2158 grouped_pipes[i]->stream->vblank_synchronized = false;
2159 grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2162 DC_SYNC_INFO("Aligning DP DTOs\n");
2164 master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2166 DC_SYNC_INFO("Synchronizing VBlanks\n");
2169 for (i = 0; i < group_size; i++) {
2170 if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2171 grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2172 grouped_pipes[master]->stream_res.tg,
2173 grouped_pipes[i]->stream_res.tg,
2174 grouped_pipes[master]->stream->timing.pix_clk_100hz,
2175 grouped_pipes[i]->stream->timing.pix_clk_100hz,
2176 get_clock_divider(grouped_pipes[master], false),
2177 get_clock_divider(grouped_pipes[i], false));
2178 grouped_pipes[i]->stream->vblank_synchronized = true;
2180 grouped_pipes[master]->stream->vblank_synchronized = true;
2181 DC_SYNC_INFO("Sync complete\n");
2184 for (i = 1; i < group_size; i++) {
2185 opp = grouped_pipes[i]->stream_res.opp;
2186 tg = grouped_pipes[i]->stream_res.tg;
2187 tg->funcs->get_otg_active_size(tg, &width, &height);
2188 if (opp->funcs->opp_program_dpg_dimensions)
2189 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2193 void dcn10_enable_timing_synchronization(
2197 struct pipe_ctx *grouped_pipes[])
2199 struct dc_context *dc_ctx = dc->ctx;
2200 struct output_pixel_processor *opp;
2201 struct timing_generator *tg;
2202 int i, width, height;
2204 DC_SYNC_INFO("Setting up OTG reset trigger\n");
2206 for (i = 1; i < group_size; i++) {
2207 opp = grouped_pipes[i]->stream_res.opp;
2208 tg = grouped_pipes[i]->stream_res.tg;
2209 tg->funcs->get_otg_active_size(tg, &width, &height);
2210 if (opp->funcs->opp_program_dpg_dimensions)
2211 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2214 for (i = 0; i < group_size; i++) {
2215 if (grouped_pipes[i]->stream == NULL)
2217 grouped_pipes[i]->stream->vblank_synchronized = false;
2220 for (i = 1; i < group_size; i++)
2221 grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2222 grouped_pipes[i]->stream_res.tg,
2223 grouped_pipes[0]->stream_res.tg->inst);
2225 DC_SYNC_INFO("Waiting for trigger\n");
2227 /* Need to get only check 1 pipe for having reset as all the others are
2228 * synchronized. Look at last pipe programmed to reset.
2231 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2232 for (i = 1; i < group_size; i++)
2233 grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2234 grouped_pipes[i]->stream_res.tg);
2236 for (i = 1; i < group_size; i++) {
2237 opp = grouped_pipes[i]->stream_res.opp;
2238 tg = grouped_pipes[i]->stream_res.tg;
2239 tg->funcs->get_otg_active_size(tg, &width, &height);
2240 if (opp->funcs->opp_program_dpg_dimensions)
2241 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2244 DC_SYNC_INFO("Sync complete\n");
2247 void dcn10_enable_per_frame_crtc_position_reset(
2250 struct pipe_ctx *grouped_pipes[])
2252 struct dc_context *dc_ctx = dc->ctx;
2255 DC_SYNC_INFO("Setting up\n");
2256 for (i = 0; i < group_size; i++)
2257 if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2258 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2259 grouped_pipes[i]->stream_res.tg,
2261 &grouped_pipes[i]->stream->triggered_crtc_reset);
2263 DC_SYNC_INFO("Waiting for trigger\n");
2265 for (i = 0; i < group_size; i++)
2266 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2268 DC_SYNC_INFO("Multi-display sync is complete\n");
2271 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2272 struct vm_system_aperture_param *apt,
2273 struct dce_hwseq *hws)
2275 PHYSICAL_ADDRESS_LOC physical_page_number;
2276 uint32_t logical_addr_low;
2277 uint32_t logical_addr_high;
2279 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2280 PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2281 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2282 PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2284 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2285 LOGICAL_ADDR, &logical_addr_low);
2287 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2288 LOGICAL_ADDR, &logical_addr_high);
2290 apt->sys_default.quad_part = physical_page_number.quad_part << 12;
2291 apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
2292 apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
2295 /* Temporary read settings, future will get values from kmd directly */
2296 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2297 struct vm_context0_param *vm0,
2298 struct dce_hwseq *hws)
2300 PHYSICAL_ADDRESS_LOC fb_base;
2301 PHYSICAL_ADDRESS_LOC fb_offset;
2302 uint32_t fb_base_value;
2303 uint32_t fb_offset_value;
2305 REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2306 REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2308 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2309 PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2310 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2311 PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2313 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2314 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2315 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2316 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2318 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2319 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2320 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2321 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2323 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2324 PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2325 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2326 PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2329 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2330 * Therefore we need to do
2331 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2332 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2334 fb_base.quad_part = (uint64_t)fb_base_value << 24;
2335 fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2336 vm0->pte_base.quad_part += fb_base.quad_part;
2337 vm0->pte_base.quad_part -= fb_offset.quad_part;
2341 static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2343 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2344 struct vm_system_aperture_param apt = {0};
2345 struct vm_context0_param vm0 = {0};
2347 mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2348 mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2350 hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2351 hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2354 static void dcn10_enable_plane(
2356 struct pipe_ctx *pipe_ctx,
2357 struct dc_state *context)
2359 struct dce_hwseq *hws = dc->hwseq;
2361 if (dc->debug.sanity_checks) {
2362 hws->funcs.verify_allow_pstate_change_high(dc);
2365 undo_DEGVIDCN10_253_wa(dc);
2367 power_on_plane(dc->hwseq,
2368 pipe_ctx->plane_res.hubp->inst);
2370 /* enable DCFCLK current DCHUB */
2371 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2373 /* make sure OPP_PIPE_CLOCK_EN = 1 */
2374 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2375 pipe_ctx->stream_res.opp,
2378 if (dc->config.gpu_vm_support)
2379 dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2381 if (dc->debug.sanity_checks) {
2382 hws->funcs.verify_allow_pstate_change_high(dc);
2385 if (!pipe_ctx->top_pipe
2386 && pipe_ctx->plane_state
2387 && pipe_ctx->plane_state->flip_int_enabled
2388 && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2389 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2393 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2396 struct dpp_grph_csc_adjustment adjust;
2397 memset(&adjust, 0, sizeof(adjust));
2398 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2401 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2402 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2403 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2404 adjust.temperature_matrix[i] =
2405 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2406 } else if (pipe_ctx->plane_state &&
2407 pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2408 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2409 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2410 adjust.temperature_matrix[i] =
2411 pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2414 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2418 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2420 if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2421 if (pipe_ctx->top_pipe) {
2422 struct pipe_ctx *top = pipe_ctx->top_pipe;
2424 while (top->top_pipe)
2425 top = top->top_pipe; // Traverse to top pipe_ctx
2426 if (top->plane_state && top->plane_state->layer_index == 0)
2427 return true; // Front MPO plane not hidden
2433 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2435 // Override rear plane RGB bias to fix MPO brightness
2436 uint16_t rgb_bias = matrix[3];
2441 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2442 matrix[3] = rgb_bias;
2443 matrix[7] = rgb_bias;
2444 matrix[11] = rgb_bias;
2447 void dcn10_program_output_csc(struct dc *dc,
2448 struct pipe_ctx *pipe_ctx,
2449 enum dc_color_space colorspace,
2453 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2454 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2456 /* MPO is broken with RGB colorspaces when OCSC matrix
2457 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2458 * Blending adds offsets from front + rear to rear plane
2460 * Fix is to set RGB bias to 0 on rear plane, top plane
2461 * black value pixels add offset instead of rear + front
2464 int16_t rgb_bias = matrix[3];
2465 // matrix[3/7/11] are all the same offset value
2467 if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2468 dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2470 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2474 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2475 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2479 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2481 struct dc_bias_and_scale bns_params = {0};
2483 // program the input csc
2484 dpp->funcs->dpp_setup(dpp,
2485 plane_state->format,
2486 EXPANSION_MODE_ZERO,
2487 plane_state->input_csc_color_matrix,
2488 plane_state->color_space,
2491 //set scale and bias registers
2492 build_prescale_params(&bns_params, plane_state);
2493 if (dpp->funcs->dpp_program_bias_and_scale)
2494 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2497 void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
2499 struct mpc *mpc = dc->res_pool->mpc;
2501 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
2502 get_hdr_visual_confirm_color(pipe_ctx, color);
2503 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
2504 get_surface_visual_confirm_color(pipe_ctx, color);
2505 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
2506 get_surface_tile_visual_confirm_color(pipe_ctx, color);
2508 color_space_to_black_color(
2509 dc, pipe_ctx->stream->output_color_space, color);
2511 if (mpc->funcs->set_bg_color)
2512 mpc->funcs->set_bg_color(mpc, color, mpcc_id);
2515 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2517 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2518 struct mpcc_blnd_cfg blnd_cfg = {0};
2519 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2521 struct mpcc *new_mpcc;
2522 struct mpc *mpc = dc->res_pool->mpc;
2523 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2525 if (per_pixel_alpha)
2526 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2528 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2530 blnd_cfg.overlap_only = false;
2531 blnd_cfg.global_gain = 0xff;
2533 if (pipe_ctx->plane_state->global_alpha)
2534 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2536 blnd_cfg.global_alpha = 0xff;
2538 /* DCN1.0 has output CM before MPC which seems to screw with
2539 * pre-multiplied alpha.
2541 blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
2542 pipe_ctx->stream->output_color_space)
2548 * Note: currently there is a bug in init_hw such that
2549 * on resume from hibernate, BIOS sets up MPCC0, and
2550 * we do mpcc_remove but the mpcc cannot go to idle
2551 * after remove. This cause us to pick mpcc1 here,
2552 * which causes a pstate hang for yet unknown reason.
2554 mpcc_id = hubp->inst;
2556 /* If there is no full update, don't need to touch MPC tree*/
2557 if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2558 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2559 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2563 /* check if this MPCC is already being used */
2564 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2565 /* remove MPCC if being used */
2566 if (new_mpcc != NULL)
2567 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2569 if (dc->debug.sanity_checks)
2570 mpc->funcs->assert_mpcc_idle_before_connect(
2571 dc->res_pool->mpc, mpcc_id);
2573 /* Call MPC to insert new plane */
2574 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2581 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2583 ASSERT(new_mpcc != NULL);
2585 hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2586 hubp->mpcc_id = mpcc_id;
2589 static void update_scaler(struct pipe_ctx *pipe_ctx)
2591 bool per_pixel_alpha =
2592 pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2594 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2595 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2596 /* scaler configuration */
2597 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2598 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2601 static void dcn10_update_dchubp_dpp(
2603 struct pipe_ctx *pipe_ctx,
2604 struct dc_state *context)
2606 struct dce_hwseq *hws = dc->hwseq;
2607 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2608 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2609 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2610 struct plane_size size = plane_state->plane_size;
2611 unsigned int compat_level = 0;
2612 bool should_divided_by_2 = false;
2614 /* depends on DML calculation, DPP clock value may change dynamically */
2615 /* If request max dpp clk is lower than current dispclk, no need to
2618 if (plane_state->update_flags.bits.full_update) {
2620 /* new calculated dispclk, dppclk are stored in
2621 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2622 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2623 * dcn10_validate_bandwidth compute new dispclk, dppclk.
2624 * dispclk will put in use after optimize_bandwidth when
2625 * ramp_up_dispclk_with_dpp is called.
2626 * there are two places for dppclk be put in use. One location
2627 * is the same as the location as dispclk. Another is within
2628 * update_dchubp_dpp which happens between pre_bandwidth and
2629 * optimize_bandwidth.
2630 * dppclk updated within update_dchubp_dpp will cause new
2631 * clock values of dispclk and dppclk not be in use at the same
2632 * time. when clocks are decreased, this may cause dppclk is
2633 * lower than previous configuration and let pipe stuck.
2634 * for example, eDP + external dp, change resolution of DP from
2635 * 1920x1080x144hz to 1280x960x60hz.
2636 * before change: dispclk = 337889 dppclk = 337889
2637 * change mode, dcn10_validate_bandwidth calculate
2638 * dispclk = 143122 dppclk = 143122
2639 * update_dchubp_dpp be executed before dispclk be updated,
2640 * dispclk = 337889, but dppclk use new value dispclk /2 =
2641 * 168944. this will cause pipe pstate warning issue.
2642 * solution: between pre_bandwidth and optimize_bandwidth, while
2643 * dispclk is going to be decreased, keep dppclk = dispclk
2645 if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2646 dc->clk_mgr->clks.dispclk_khz)
2647 should_divided_by_2 = false;
2649 should_divided_by_2 =
2650 context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2651 dc->clk_mgr->clks.dispclk_khz / 2;
2653 dpp->funcs->dpp_dppclk_control(
2655 should_divided_by_2,
2658 if (dc->res_pool->dccg)
2659 dc->res_pool->dccg->funcs->update_dpp_dto(
2662 pipe_ctx->plane_res.bw.dppclk_khz);
2664 dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2665 dc->clk_mgr->clks.dispclk_khz / 2 :
2666 dc->clk_mgr->clks.dispclk_khz;
2669 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2670 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2671 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2673 if (plane_state->update_flags.bits.full_update) {
2674 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2676 hubp->funcs->hubp_setup(
2678 &pipe_ctx->dlg_regs,
2679 &pipe_ctx->ttu_regs,
2681 &pipe_ctx->pipe_dlg_param);
2682 hubp->funcs->hubp_setup_interdependent(
2684 &pipe_ctx->dlg_regs,
2685 &pipe_ctx->ttu_regs);
2688 size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2690 if (plane_state->update_flags.bits.full_update ||
2691 plane_state->update_flags.bits.bpp_change)
2692 dcn10_update_dpp(dpp, plane_state);
2694 if (plane_state->update_flags.bits.full_update ||
2695 plane_state->update_flags.bits.per_pixel_alpha_change ||
2696 plane_state->update_flags.bits.global_alpha_change)
2697 hws->funcs.update_mpcc(dc, pipe_ctx);
2699 if (plane_state->update_flags.bits.full_update ||
2700 plane_state->update_flags.bits.per_pixel_alpha_change ||
2701 plane_state->update_flags.bits.global_alpha_change ||
2702 plane_state->update_flags.bits.scaling_change ||
2703 plane_state->update_flags.bits.position_change) {
2704 update_scaler(pipe_ctx);
2707 if (plane_state->update_flags.bits.full_update ||
2708 plane_state->update_flags.bits.scaling_change ||
2709 plane_state->update_flags.bits.position_change) {
2710 hubp->funcs->mem_program_viewport(
2712 &pipe_ctx->plane_res.scl_data.viewport,
2713 &pipe_ctx->plane_res.scl_data.viewport_c);
2716 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2717 dc->hwss.set_cursor_position(pipe_ctx);
2718 dc->hwss.set_cursor_attribute(pipe_ctx);
2720 if (dc->hwss.set_cursor_sdr_white_level)
2721 dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2724 if (plane_state->update_flags.bits.full_update) {
2726 dc->hwss.program_gamut_remap(pipe_ctx);
2728 dc->hwss.program_output_csc(dc,
2730 pipe_ctx->stream->output_color_space,
2731 pipe_ctx->stream->csc_color_matrix.matrix,
2732 pipe_ctx->stream_res.opp->inst);
2735 if (plane_state->update_flags.bits.full_update ||
2736 plane_state->update_flags.bits.pixel_format_change ||
2737 plane_state->update_flags.bits.horizontal_mirror_change ||
2738 plane_state->update_flags.bits.rotation_change ||
2739 plane_state->update_flags.bits.swizzle_change ||
2740 plane_state->update_flags.bits.dcc_change ||
2741 plane_state->update_flags.bits.bpp_change ||
2742 plane_state->update_flags.bits.scaling_change ||
2743 plane_state->update_flags.bits.plane_size_change) {
2744 hubp->funcs->hubp_program_surface_config(
2746 plane_state->format,
2747 &plane_state->tiling_info,
2749 plane_state->rotation,
2751 plane_state->horizontal_mirror,
2755 hubp->power_gated = false;
2757 hws->funcs.update_plane_addr(dc, pipe_ctx);
2759 if (is_pipe_tree_visible(pipe_ctx))
2760 hubp->funcs->set_blank(hubp, false);
2763 void dcn10_blank_pixel_data(
2765 struct pipe_ctx *pipe_ctx,
2768 enum dc_color_space color_space;
2769 struct tg_color black_color = {0};
2770 struct stream_resource *stream_res = &pipe_ctx->stream_res;
2771 struct dc_stream_state *stream = pipe_ctx->stream;
2773 /* program otg blank color */
2774 color_space = stream->output_color_space;
2775 color_space_to_black_color(dc, color_space, &black_color);
2778 * The way 420 is packed, 2 channels carry Y component, 1 channel
2779 * alternate between Cb and Cr, so both channels need the pixel
2782 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2783 black_color.color_r_cr = black_color.color_g_y;
2786 if (stream_res->tg->funcs->set_blank_color)
2787 stream_res->tg->funcs->set_blank_color(
2792 if (stream_res->tg->funcs->set_blank)
2793 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2794 if (stream_res->abm) {
2795 dc->hwss.set_pipe(pipe_ctx);
2796 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2799 dc->hwss.set_abm_immediate_disable(pipe_ctx);
2800 if (stream_res->tg->funcs->set_blank) {
2801 stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2802 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2807 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2809 struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2810 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2811 struct custom_float_format fmt;
2813 fmt.exponenta_bits = 6;
2814 fmt.mantissa_bits = 12;
2818 if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2819 convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2821 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2822 pipe_ctx->plane_res.dpp, hw_mult);
2825 void dcn10_program_pipe(
2827 struct pipe_ctx *pipe_ctx,
2828 struct dc_state *context)
2830 struct dce_hwseq *hws = dc->hwseq;
2832 if (pipe_ctx->top_pipe == NULL) {
2833 bool blank = !is_pipe_tree_visible(pipe_ctx);
2835 pipe_ctx->stream_res.tg->funcs->program_global_sync(
2836 pipe_ctx->stream_res.tg,
2837 pipe_ctx->pipe_dlg_param.vready_offset,
2838 pipe_ctx->pipe_dlg_param.vstartup_start,
2839 pipe_ctx->pipe_dlg_param.vupdate_offset,
2840 pipe_ctx->pipe_dlg_param.vupdate_width);
2842 pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2843 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2845 if (hws->funcs.setup_vupdate_interrupt)
2846 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2848 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2851 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2852 dcn10_enable_plane(dc, pipe_ctx, context);
2854 dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2856 hws->funcs.set_hdr_multiplier(pipe_ctx);
2858 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2859 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2860 pipe_ctx->plane_state->update_flags.bits.gamma_change)
2861 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2863 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
2864 * only do gamma programming for full update.
2865 * TODO: This can be further optimized/cleaned up
2866 * Always call this for now since it does memcmp inside before
2867 * doing heavy calculation and programming
2869 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2870 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2873 void dcn10_wait_for_pending_cleared(struct dc *dc,
2874 struct dc_state *context)
2876 struct pipe_ctx *pipe_ctx;
2877 struct timing_generator *tg;
2880 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2881 pipe_ctx = &context->res_ctx.pipe_ctx[i];
2882 tg = pipe_ctx->stream_res.tg;
2885 * Only wait for top pipe's tg penindg bit
2886 * Also skip if pipe is disabled.
2888 if (pipe_ctx->top_pipe ||
2889 !pipe_ctx->stream || !pipe_ctx->plane_state ||
2890 !tg->funcs->is_tg_enabled(tg))
2894 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
2895 * For some reason waiting for OTG_UPDATE_PENDING cleared
2896 * seems to not trigger the update right away, and if we
2897 * lock again before VUPDATE then we don't get a separated
2900 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
2901 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
2905 void dcn10_post_unlock_program_front_end(
2907 struct dc_state *context)
2911 DC_LOGGER_INIT(dc->ctx->logger);
2913 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2914 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2916 if (!pipe_ctx->top_pipe &&
2917 !pipe_ctx->prev_odm_pipe &&
2919 struct timing_generator *tg = pipe_ctx->stream_res.tg;
2921 if (context->stream_status[i].plane_count == 0)
2922 false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
2926 for (i = 0; i < dc->res_pool->pipe_count; i++)
2927 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
2928 dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
2930 for (i = 0; i < dc->res_pool->pipe_count; i++)
2931 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
2932 dc->hwss.optimize_bandwidth(dc, context);
2936 if (dc->hwseq->wa.DEGVIDCN10_254)
2937 hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
2940 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
2944 for (i = 0; i < context->stream_count; i++) {
2945 if (context->streams[i]->timing.timing_3d_format
2946 == TIMING_3D_FORMAT_HW_FRAME_PACKING) {
2950 hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
2956 void dcn10_prepare_bandwidth(
2958 struct dc_state *context)
2960 struct dce_hwseq *hws = dc->hwseq;
2961 struct hubbub *hubbub = dc->res_pool->hubbub;
2963 if (dc->debug.sanity_checks)
2964 hws->funcs.verify_allow_pstate_change_high(dc);
2966 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2967 if (context->stream_count == 0)
2968 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2970 dc->clk_mgr->funcs->update_clocks(
2976 dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
2977 &context->bw_ctx.bw.dcn.watermarks,
2978 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2980 dcn10_stereo_hw_frame_pack_wa(dc, context);
2982 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2983 dcn_bw_notify_pplib_of_wm_ranges(dc);
2985 if (dc->debug.sanity_checks)
2986 hws->funcs.verify_allow_pstate_change_high(dc);
2989 void dcn10_optimize_bandwidth(
2991 struct dc_state *context)
2993 struct dce_hwseq *hws = dc->hwseq;
2994 struct hubbub *hubbub = dc->res_pool->hubbub;
2996 if (dc->debug.sanity_checks)
2997 hws->funcs.verify_allow_pstate_change_high(dc);
2999 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3000 if (context->stream_count == 0)
3001 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3003 dc->clk_mgr->funcs->update_clocks(
3009 hubbub->funcs->program_watermarks(hubbub,
3010 &context->bw_ctx.bw.dcn.watermarks,
3011 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3014 dcn10_stereo_hw_frame_pack_wa(dc, context);
3016 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
3017 dcn_bw_notify_pplib_of_wm_ranges(dc);
3019 if (dc->debug.sanity_checks)
3020 hws->funcs.verify_allow_pstate_change_high(dc);
3023 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3024 int num_pipes, struct dc_crtc_timing_adjust adjust)
3027 struct drr_params params = {0};
3028 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3029 unsigned int event_triggers = 0x800;
3030 // Note DRR trigger events are generated regardless of whether num frames met.
3031 unsigned int num_frames = 2;
3033 params.vertical_total_max = adjust.v_total_max;
3034 params.vertical_total_min = adjust.v_total_min;
3035 params.vertical_total_mid = adjust.v_total_mid;
3036 params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3037 /* TODO: If multiple pipes are to be supported, you need
3038 * some GSL stuff. Static screen triggers may be programmed differently
3041 for (i = 0; i < num_pipes; i++) {
3042 pipe_ctx[i]->stream_res.tg->funcs->set_drr(
3043 pipe_ctx[i]->stream_res.tg, ¶ms);
3044 if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3045 pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
3046 pipe_ctx[i]->stream_res.tg,
3047 event_triggers, num_frames);
3051 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3053 struct crtc_position *position)
3057 /* TODO: handle pipes > 1
3059 for (i = 0; i < num_pipes; i++)
3060 pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3063 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3064 int num_pipes, const struct dc_static_screen_params *params)
3067 unsigned int triggers = 0;
3069 if (params->triggers.surface_update)
3071 if (params->triggers.cursor_update)
3073 if (params->triggers.force_trigger)
3076 for (i = 0; i < num_pipes; i++)
3077 pipe_ctx[i]->stream_res.tg->funcs->
3078 set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3079 triggers, params->num_frames);
3082 static void dcn10_config_stereo_parameters(
3083 struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3085 enum view_3d_format view_format = stream->view_format;
3086 enum dc_timing_3d_format timing_3d_format =\
3087 stream->timing.timing_3d_format;
3088 bool non_stereo_timing = false;
3090 if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3091 timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3092 timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3093 non_stereo_timing = true;
3095 if (non_stereo_timing == false &&
3096 view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3098 flags->PROGRAM_STEREO = 1;
3099 flags->PROGRAM_POLARITY = 1;
3100 if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
3101 timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3102 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3103 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3104 enum display_dongle_type dongle = \
3105 stream->link->ddc->dongle_type;
3106 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3107 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3108 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3109 flags->DISABLE_STEREO_DP_SYNC = 1;
3111 flags->RIGHT_EYE_POLARITY =\
3112 stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3113 if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3114 flags->FRAME_PACKED = 1;
3120 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3122 struct crtc_stereo_flags flags = { 0 };
3123 struct dc_stream_state *stream = pipe_ctx->stream;
3125 dcn10_config_stereo_parameters(stream, &flags);
3127 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3128 if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3129 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3131 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3134 pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3135 pipe_ctx->stream_res.opp,
3136 flags.PROGRAM_STEREO == 1,
3139 pipe_ctx->stream_res.tg->funcs->program_stereo(
3140 pipe_ctx->stream_res.tg,
3147 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3151 for (i = 0; i < res_pool->pipe_count; i++) {
3152 if (res_pool->hubps[i]->inst == mpcc_inst)
3153 return res_pool->hubps[i];
3159 void dcn10_wait_for_mpcc_disconnect(
3161 struct resource_pool *res_pool,
3162 struct pipe_ctx *pipe_ctx)
3164 struct dce_hwseq *hws = dc->hwseq;
3167 if (dc->debug.sanity_checks) {
3168 hws->funcs.verify_allow_pstate_change_high(dc);
3171 if (!pipe_ctx->stream_res.opp)
3174 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3175 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3176 struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3178 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3179 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3180 hubp->funcs->set_blank(hubp, true);
3184 if (dc->debug.sanity_checks) {
3185 hws->funcs.verify_allow_pstate_change_high(dc);
3190 bool dcn10_dummy_display_power_gating(
3192 uint8_t controller_id,
3193 struct dc_bios *dcb,
3194 enum pipe_gating_control power_gating)
3199 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3201 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3202 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3204 struct dc *dc = plane_state->ctx->dc;
3206 if (plane_state == NULL)
3209 flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3210 pipe_ctx->plane_res.hubp);
3212 plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3215 plane_state->status.current_address = plane_state->status.requested_address;
3217 if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3218 tg->funcs->is_stereo_left_eye) {
3219 plane_state->status.is_right_eye =
3220 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3223 if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3224 struct dce_hwseq *hwseq = dc->hwseq;
3225 struct timing_generator *tg = dc->res_pool->timing_generators[0];
3226 unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3228 if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3229 struct hubbub *hubbub = dc->res_pool->hubbub;
3231 hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3232 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3237 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3239 struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3241 /* In DCN, this programming sequence is owned by the hubbub */
3242 hubbub->funcs->update_dchub(hubbub, dh_data);
3245 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3247 struct pipe_ctx *test_pipe, *split_pipe;
3248 const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
3249 struct rect r1 = scl_data->recout, r2, r2_half;
3250 int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
3251 int cur_layer = pipe_ctx->plane_state->layer_index;
3254 * Disable the cursor if there's another pipe above this with a
3255 * plane that contains this pipe's viewport to prevent double cursor
3256 * and incorrect scaling artifacts.
3258 for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3259 test_pipe = test_pipe->top_pipe) {
3260 // Skip invisible layer and pipe-split plane on same layer
3261 if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
3264 r2 = test_pipe->plane_res.scl_data.recout;
3265 r2_r = r2.x + r2.width;
3266 r2_b = r2.y + r2.height;
3267 split_pipe = test_pipe;
3270 * There is another half plane on same layer because of
3271 * pipe-split, merge together per same height.
3273 for (split_pipe = pipe_ctx->top_pipe; split_pipe;
3274 split_pipe = split_pipe->top_pipe)
3275 if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
3276 r2_half = split_pipe->plane_res.scl_data.recout;
3277 r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
3278 r2.width = r2.width + r2_half.width;
3279 r2_r = r2.x + r2.width;
3283 if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
3290 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3292 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3293 struct hubp *hubp = pipe_ctx->plane_res.hubp;
3294 struct dpp *dpp = pipe_ctx->plane_res.dpp;
3295 struct dc_cursor_mi_param param = {
3296 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3297 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3298 .viewport = pipe_ctx->plane_res.scl_data.viewport,
3299 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3300 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3301 .rotation = pipe_ctx->plane_state->rotation,
3302 .mirror = pipe_ctx->plane_state->horizontal_mirror
3304 bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
3305 (pipe_ctx->bottom_pipe != NULL);
3306 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3307 (pipe_ctx->prev_odm_pipe != NULL);
3309 int x_plane = pipe_ctx->plane_state->dst_rect.x;
3310 int y_plane = pipe_ctx->plane_state->dst_rect.y;
3311 int x_pos = pos_cpy.x;
3312 int y_pos = pos_cpy.y;
3315 * DC cursor is stream space, HW cursor is plane space and drawn
3316 * as part of the framebuffer.
3318 * Cursor position can't be negative, but hotspot can be used to
3319 * shift cursor out of the plane bounds. Hotspot must be smaller
3320 * than the cursor size.
3324 * Translate cursor from stream space to plane space.
3326 * If the cursor is scaled then we need to scale the position
3327 * to be in the approximately correct place. We can't do anything
3328 * about the actual size being incorrect, that's a limitation of
3331 if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3332 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3333 pipe_ctx->plane_state->dst_rect.width;
3334 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3335 pipe_ctx->plane_state->dst_rect.height;
3337 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3338 pipe_ctx->plane_state->dst_rect.width;
3339 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3340 pipe_ctx->plane_state->dst_rect.height;
3344 * If the cursor's source viewport is clipped then we need to
3345 * translate the cursor to appear in the correct position on
3348 * This translation isn't affected by scaling so it needs to be
3349 * done *after* we adjust the position for the scale factor.
3351 * This is only done by opt-in for now since there are still
3352 * some usecases like tiled display that might enable the
3353 * cursor on both streams while expecting dc to clip it.
3355 if (pos_cpy.translate_by_source) {
3356 x_pos += pipe_ctx->plane_state->src_rect.x;
3357 y_pos += pipe_ctx->plane_state->src_rect.y;
3361 * If the position is negative then we need to add to the hotspot
3362 * to shift the cursor outside the plane.
3366 pos_cpy.x_hotspot -= x_pos;
3371 pos_cpy.y_hotspot -= y_pos;
3375 pos_cpy.x = (uint32_t)x_pos;
3376 pos_cpy.y = (uint32_t)y_pos;
3378 if (pipe_ctx->plane_state->address.type
3379 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3380 pos_cpy.enable = false;
3382 if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3383 pos_cpy.enable = false;
3385 // Swap axis and mirror horizontally
3386 if (param.rotation == ROTATION_ANGLE_90) {
3387 uint32_t temp_x = pos_cpy.x;
3389 pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3390 (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3393 // Swap axis and mirror vertically
3394 else if (param.rotation == ROTATION_ANGLE_270) {
3395 uint32_t temp_y = pos_cpy.y;
3396 int viewport_height =
3397 pipe_ctx->plane_res.scl_data.viewport.height;
3399 pipe_ctx->plane_res.scl_data.viewport.y;
3402 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3403 * For pipe split cases:
3404 * - apply offset of viewport.y to normalize pos_cpy.x
3405 * - calculate the pos_cpy.y as before
3406 * - shift pos_cpy.y back by same offset to get final value
3407 * - since we iterate through both pipes, use the lower
3408 * viewport.y for offset
3409 * For non pipe split cases, use the same calculation for
3410 * pos_cpy.y as the 180 degree rotation case below,
3411 * but use pos_cpy.x as our input because we are rotating
3414 if (pipe_split_on || odm_combine_on) {
3415 int pos_cpy_x_offset;
3416 int other_pipe_viewport_y;
3418 if (pipe_split_on) {
3419 if (pipe_ctx->bottom_pipe) {
3420 other_pipe_viewport_y =
3421 pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3423 other_pipe_viewport_y =
3424 pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3427 if (pipe_ctx->next_odm_pipe) {
3428 other_pipe_viewport_y =
3429 pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3431 other_pipe_viewport_y =
3432 pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3435 pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3436 other_pipe_viewport_y : viewport_y;
3437 pos_cpy.x -= pos_cpy_x_offset;
3438 if (pos_cpy.x > viewport_height) {
3439 pos_cpy.x = pos_cpy.x - viewport_height;
3440 pos_cpy.y = viewport_height - pos_cpy.x;
3442 pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3444 pos_cpy.y += pos_cpy_x_offset;
3446 pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3450 // Mirror horizontally and vertically
3451 else if (param.rotation == ROTATION_ANGLE_180) {
3452 int viewport_width =
3453 pipe_ctx->plane_res.scl_data.viewport.width;
3455 pipe_ctx->plane_res.scl_data.viewport.x;
3457 if (pipe_split_on || odm_combine_on) {
3458 if (pos_cpy.x >= viewport_width + viewport_x) {
3459 pos_cpy.x = 2 * viewport_width
3460 - pos_cpy.x + 2 * viewport_x;
3462 uint32_t temp_x = pos_cpy.x;
3464 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3465 if (temp_x >= viewport_x +
3466 (int)hubp->curs_attr.width || pos_cpy.x
3467 <= (int)hubp->curs_attr.width +
3468 pipe_ctx->plane_state->src_rect.x) {
3469 pos_cpy.x = temp_x + viewport_width;
3473 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3477 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3479 * delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3480 * pos_cpy.y_new = viewport.y + delta_from_bottom
3482 * pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3484 pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3485 pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3488 hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
3489 dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height);
3492 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3494 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3496 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3497 pipe_ctx->plane_res.hubp, attributes);
3498 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3499 pipe_ctx->plane_res.dpp, attributes);
3502 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3504 uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3505 struct fixed31_32 multiplier;
3506 struct dpp_cursor_attributes opt_attr = { 0 };
3507 uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3508 struct custom_float_format fmt;
3510 if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3513 fmt.exponenta_bits = 5;
3514 fmt.mantissa_bits = 10;
3517 if (sdr_white_level > 80) {
3518 multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3519 convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3522 opt_attr.scale = hw_scale;
3525 pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3526 pipe_ctx->plane_res.dpp, &opt_attr);
3530 * apply_front_porch_workaround TODO FPGA still need?
3532 * This is a workaround for a bug that has existed since R5xx and has not been
3533 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3535 static void apply_front_porch_workaround(
3536 struct dc_crtc_timing *timing)
3538 if (timing->flags.INTERLACE == 1) {
3539 if (timing->v_front_porch < 2)
3540 timing->v_front_porch = 2;
3542 if (timing->v_front_porch < 1)
3543 timing->v_front_porch = 1;
3547 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3549 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3550 struct dc_crtc_timing patched_crtc_timing;
3551 int vesa_sync_start;
3553 int interlace_factor;
3554 int vertical_line_start;
3556 patched_crtc_timing = *dc_crtc_timing;
3557 apply_front_porch_workaround(&patched_crtc_timing);
3559 interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3561 vesa_sync_start = patched_crtc_timing.v_addressable +
3562 patched_crtc_timing.v_border_bottom +
3563 patched_crtc_timing.v_front_porch;
3565 asic_blank_end = (patched_crtc_timing.v_total -
3567 patched_crtc_timing.v_border_top)
3570 vertical_line_start = asic_blank_end -
3571 pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3573 return vertical_line_start;
3576 void dcn10_calc_vupdate_position(
3578 struct pipe_ctx *pipe_ctx,
3579 uint32_t *start_line,
3582 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3583 int vline_int_offset_from_vupdate =
3584 pipe_ctx->stream->periodic_interrupt0.lines_offset;
3585 int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3588 if (vline_int_offset_from_vupdate > 0)
3589 vline_int_offset_from_vupdate--;
3590 else if (vline_int_offset_from_vupdate < 0)
3591 vline_int_offset_from_vupdate++;
3593 start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
3595 if (start_position >= 0)
3596 *start_line = start_position;
3598 *start_line = dc_crtc_timing->v_total + start_position - 1;
3600 *end_line = *start_line + 2;
3602 if (*end_line >= dc_crtc_timing->v_total)
3606 static void dcn10_cal_vline_position(
3608 struct pipe_ctx *pipe_ctx,
3609 enum vline_select vline,
3610 uint32_t *start_line,
3613 enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
3615 if (vline == VLINE0)
3616 ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
3617 else if (vline == VLINE1)
3618 ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
3620 switch (ref_point) {
3621 case START_V_UPDATE:
3622 dcn10_calc_vupdate_position(
3629 // Suppose to do nothing because vsync is 0;
3637 void dcn10_setup_periodic_interrupt(
3639 struct pipe_ctx *pipe_ctx,
3640 enum vline_select vline)
3642 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3644 if (vline == VLINE0) {
3645 uint32_t start_line = 0;
3646 uint32_t end_line = 0;
3648 dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
3650 tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3652 } else if (vline == VLINE1) {
3653 pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
3655 pipe_ctx->stream->periodic_interrupt1.lines_offset);
3659 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3661 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3662 int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3664 if (start_line < 0) {
3669 if (tg->funcs->setup_vertical_interrupt2)
3670 tg->funcs->setup_vertical_interrupt2(tg, start_line);
3673 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3674 struct dc_link_settings *link_settings)
3676 struct encoder_unblank_param params = {0};
3677 struct dc_stream_state *stream = pipe_ctx->stream;
3678 struct dc_link *link = stream->link;
3679 struct dce_hwseq *hws = link->dc->hwseq;
3681 /* only 3 items below are used by unblank */
3682 params.timing = pipe_ctx->stream->timing;
3684 params.link_settings.link_rate = link_settings->link_rate;
3686 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3687 if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3688 params.timing.pix_clk_100hz /= 2;
3689 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms);
3692 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3693 hws->funcs.edp_backlight_control(link, true);
3697 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3698 const uint8_t *custom_sdp_message,
3699 unsigned int sdp_message_size)
3701 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3702 pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3703 pipe_ctx->stream_res.stream_enc,
3708 enum dc_status dcn10_set_clock(struct dc *dc,
3709 enum dc_clock_type clock_type,
3713 struct dc_state *context = dc->current_state;
3714 struct dc_clock_config clock_cfg = {0};
3715 struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3717 if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3718 return DC_FAIL_UNSUPPORTED_1;
3720 dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3721 context, clock_type, &clock_cfg);
3723 if (clk_khz > clock_cfg.max_clock_khz)
3724 return DC_FAIL_CLK_EXCEED_MAX;
3726 if (clk_khz < clock_cfg.min_clock_khz)
3727 return DC_FAIL_CLK_BELOW_MIN;
3729 if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3730 return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3732 /*update internal request clock for update clock use*/
3733 if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3734 current_clocks->dispclk_khz = clk_khz;
3735 else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3736 current_clocks->dppclk_khz = clk_khz;
3738 return DC_ERROR_UNEXPECTED;
3740 if (dc->clk_mgr->funcs->update_clocks)
3741 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3747 void dcn10_get_clock(struct dc *dc,
3748 enum dc_clock_type clock_type,
3749 struct dc_clock_config *clock_cfg)
3751 struct dc_state *context = dc->current_state;
3753 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3754 dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3758 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3760 struct resource_pool *pool = dc->res_pool;
3763 for (i = 0; i < pool->pipe_count; i++) {
3764 struct hubp *hubp = pool->hubps[i];
3765 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3767 hubp->funcs->hubp_read_state(hubp);
3770 dcc_en_bits[i] = s->dcc_en ? 1 : 0;