2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
50 #include "link_hwss.h"
51 #include "dpcd_defs.h"
53 #include "dce/dmub_psr.h"
54 #include "dc_dmub_srv.h"
55 #include "dce/dmub_hw_lock_mgr.h"
57 #include "dce/dmub_outbox.h"
60 #define DC_LOGGER_INIT(logger)
68 #define FN(reg_name, field_name) \
69 hws->shifts->field_name, hws->masks->field_name
71 /*print is 17 wide, first two characters are spaces*/
72 #define DTN_INFO_MICRO_SEC(ref_cycle) \
73 print_microsec(dc_ctx, log_ctx, ref_cycle)
75 #define GAMMA_HW_POINTS_NUM 256
77 #define PGFSM_POWER_ON 0
78 #define PGFSM_POWER_OFF 2
80 static void print_microsec(struct dc_context *dc_ctx,
81 struct dc_log_buffer_ctx *log_ctx,
84 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
85 static const unsigned int frac = 1000;
86 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
88 DTN_INFO(" %11d.%03d",
93 void dcn10_lock_all_pipes(struct dc *dc,
94 struct dc_state *context,
97 struct pipe_ctx *pipe_ctx;
98 struct pipe_ctx *old_pipe_ctx;
99 struct timing_generator *tg;
102 for (i = 0; i < dc->res_pool->pipe_count; i++) {
103 old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
104 pipe_ctx = &context->res_ctx.pipe_ctx[i];
105 tg = pipe_ctx->stream_res.tg;
108 * Only lock the top pipe's tg to prevent redundant
109 * (un)locking. Also skip if pipe is disabled.
111 if (pipe_ctx->top_pipe ||
113 (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
114 !tg->funcs->is_tg_enabled(tg))
118 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
120 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
124 static void log_mpc_crc(struct dc *dc,
125 struct dc_log_buffer_ctx *log_ctx)
127 struct dc_context *dc_ctx = dc->ctx;
128 struct dce_hwseq *hws = dc->hwseq;
130 if (REG(MPC_CRC_RESULT_GB))
131 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
132 REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
133 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
134 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
135 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
138 static void dcn10_log_hubbub_state(struct dc *dc,
139 struct dc_log_buffer_ctx *log_ctx)
141 struct dc_context *dc_ctx = dc->ctx;
142 struct dcn_hubbub_wm wm;
145 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
146 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
148 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
149 " sr_enter sr_exit dram_clk_change\n");
151 for (i = 0; i < 4; i++) {
152 struct dcn_hubbub_wm_set *s;
155 DTN_INFO("WM_Set[%d]:", s->wm_set);
156 DTN_INFO_MICRO_SEC(s->data_urgent);
157 DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
158 DTN_INFO_MICRO_SEC(s->sr_enter);
159 DTN_INFO_MICRO_SEC(s->sr_exit);
160 DTN_INFO_MICRO_SEC(s->dram_clk_change);
167 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
169 struct dc_context *dc_ctx = dc->ctx;
170 struct resource_pool *pool = dc->res_pool;
174 "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
175 for (i = 0; i < pool->pipe_count; i++) {
176 struct hubp *hubp = pool->hubps[i];
177 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
179 hubp->funcs->hubp_read_state(hubp);
182 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
195 s->underflow_status);
196 DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
197 DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
198 DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
203 DTN_INFO("\n=========RQ========\n");
204 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
205 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
206 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
207 for (i = 0; i < pool->pipe_count; i++) {
208 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
209 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
212 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
213 pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
214 rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
215 rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
216 rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
217 rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
218 rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
219 rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
220 rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
221 rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
224 DTN_INFO("========DLG========\n");
225 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
226 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
227 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
228 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
229 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
230 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
231 " x_rp_dlay x_rr_sfl\n");
232 for (i = 0; i < pool->pipe_count; i++) {
233 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
234 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
237 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
238 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
239 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
240 pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
241 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
242 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
243 dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
244 dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
245 dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
246 dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
247 dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
248 dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
249 dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
250 dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
251 dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
252 dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
253 dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
254 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
255 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
256 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
257 dlg_regs->xfc_reg_remote_surface_flip_latency);
260 DTN_INFO("========TTU========\n");
261 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
262 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
263 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
264 for (i = 0; i < pool->pipe_count; i++) {
265 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
266 struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
269 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
270 pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
271 ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
272 ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
273 ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
274 ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
275 ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
276 ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
281 void dcn10_log_hw_state(struct dc *dc,
282 struct dc_log_buffer_ctx *log_ctx)
284 struct dc_context *dc_ctx = dc->ctx;
285 struct resource_pool *pool = dc->res_pool;
290 dcn10_log_hubbub_state(dc, log_ctx);
292 dcn10_log_hubp_states(dc, log_ctx);
294 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
295 " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
296 "C31 C32 C33 C34\n");
297 for (i = 0; i < pool->pipe_count; i++) {
298 struct dpp *dpp = pool->dpps[i];
299 struct dcn_dpp_state s = {0};
301 dpp->funcs->dpp_read_state(dpp, &s);
306 DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
307 "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
310 (s.igam_lut_mode == 0) ? "BypassFixed" :
311 ((s.igam_lut_mode == 1) ? "BypassFloat" :
312 ((s.igam_lut_mode == 2) ? "RAM" :
313 ((s.igam_lut_mode == 3) ? "RAM" :
315 (s.dgam_lut_mode == 0) ? "Bypass" :
316 ((s.dgam_lut_mode == 1) ? "sRGB" :
317 ((s.dgam_lut_mode == 2) ? "Ycc" :
318 ((s.dgam_lut_mode == 3) ? "RAM" :
319 ((s.dgam_lut_mode == 4) ? "RAM" :
321 (s.rgam_lut_mode == 0) ? "Bypass" :
322 ((s.rgam_lut_mode == 1) ? "sRGB" :
323 ((s.rgam_lut_mode == 2) ? "Ycc" :
324 ((s.rgam_lut_mode == 3) ? "RAM" :
325 ((s.rgam_lut_mode == 4) ? "RAM" :
328 s.gamut_remap_c11_c12,
329 s.gamut_remap_c13_c14,
330 s.gamut_remap_c21_c22,
331 s.gamut_remap_c23_c24,
332 s.gamut_remap_c31_c32,
333 s.gamut_remap_c33_c34);
338 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
339 for (i = 0; i < pool->pipe_count; i++) {
340 struct mpcc_state s = {0};
342 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
344 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
345 i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
346 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
351 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
353 for (i = 0; i < pool->timing_generator_count; i++) {
354 struct timing_generator *tg = pool->timing_generators[i];
355 struct dcn_otg_state s = {0};
356 /* Read shared OTG state registers for all DCNx */
357 optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
360 * For DCN2 and greater, a register on the OPP is used to
361 * determine if the CRTC is blanked instead of the OTG. So use
362 * dpg_is_blanked() if exists, otherwise fallback on otg.
364 * TODO: Implement DCN-specific read_otg_state hooks.
366 if (pool->opps[i]->funcs->dpg_is_blanked)
367 s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
369 s.blank_enabled = tg->funcs->is_blanked(tg);
371 //only print if OTG master is enabled
372 if ((s.otg_enabled & 1) == 0)
375 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
393 s.underflow_occurred_status,
396 // Clear underflow for debug purposes
397 // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
398 // This function is called only from Windows or Diags test environment, hence it's safe to clear
399 // it from here without affecting the original intent.
400 tg->funcs->clear_optc_underflow(tg);
404 // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
405 // TODO: Update golden log header to reflect this name change
406 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
407 for (i = 0; i < pool->res_cap->num_dsc; i++) {
408 struct display_stream_compressor *dsc = pool->dscs[i];
409 struct dcn_dsc_state s = {0};
411 dsc->funcs->dsc_read_state(dsc, &s);
412 DTN_INFO("[%d]: %-9d %-12d %-10d\n",
416 s.dsc_bits_per_pixel);
421 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
422 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
423 for (i = 0; i < pool->stream_enc_count; i++) {
424 struct stream_encoder *enc = pool->stream_enc[i];
425 struct enc_state s = {0};
427 if (enc->funcs->enc_read_state) {
428 enc->funcs->enc_read_state(enc, &s);
429 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
432 s.sec_gsp_pps_line_num,
433 s.vbid6_line_reference,
435 s.sec_gsp_pps_enable,
436 s.sec_stream_enable);
442 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
443 for (i = 0; i < dc->link_count; i++) {
444 struct link_encoder *lenc = dc->links[i]->link_enc;
446 struct link_enc_state s = {0};
448 if (lenc && lenc->funcs->read_state) {
449 lenc->funcs->read_state(lenc, &s);
450 DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
453 s.dphy_fec_ready_shadow,
454 s.dphy_fec_active_status,
455 s.dp_link_training_complete);
461 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
462 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
463 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
464 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
465 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
466 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
467 dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
468 dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
469 dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
471 log_mpc_crc(dc, log_ctx);
474 if (pool->hpo_dp_stream_enc_count > 0) {
475 DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n");
476 for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
477 struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
478 struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
480 if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
481 hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
483 DTN_INFO("[%d]: %d %d %6s %d %d %d %d %d\n",
484 hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
485 hpo_dp_se_state.stream_enc_enabled,
486 hpo_dp_se_state.otg_inst,
487 (hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
488 ((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
489 (hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
490 (hpo_dp_se_state.component_depth == 0) ? 6 :
491 ((hpo_dp_se_state.component_depth == 1) ? 8 :
492 (hpo_dp_se_state.component_depth == 2) ? 10 : 12),
493 hpo_dp_se_state.vid_stream_enabled,
494 hpo_dp_se_state.sdp_enabled,
495 hpo_dp_se_state.compressed_format,
496 hpo_dp_se_state.mapped_to_link_enc);
503 /* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
504 if (pool->hpo_dp_link_enc_count) {
505 DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n");
507 for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
508 struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
509 struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
511 if (hpo_dp_link_enc->funcs->read_state) {
512 hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
513 DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n",
514 hpo_dp_link_enc->inst,
515 hpo_dp_le_state.link_enc_enabled,
516 (hpo_dp_le_state.link_mode == 0) ? "TPS1" :
517 (hpo_dp_le_state.link_mode == 1) ? "TPS2" :
518 (hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
519 hpo_dp_le_state.lane_count,
520 hpo_dp_le_state.stream_src[0],
521 hpo_dp_le_state.slot_count[0],
522 hpo_dp_le_state.vc_rate_x[0],
523 hpo_dp_le_state.vc_rate_y[0]);
535 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
537 struct hubp *hubp = pipe_ctx->plane_res.hubp;
538 struct timing_generator *tg = pipe_ctx->stream_res.tg;
540 if (tg->funcs->is_optc_underflow_occurred(tg)) {
541 tg->funcs->clear_optc_underflow(tg);
545 if (hubp->funcs->hubp_get_underflow_status(hubp)) {
546 hubp->funcs->hubp_clear_underflow(hubp);
552 void dcn10_enable_power_gating_plane(
553 struct dce_hwseq *hws,
556 bool force_on = true; /* disable power gating */
562 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
563 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
564 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
565 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
568 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
569 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
570 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
571 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
574 void dcn10_disable_vga(
575 struct dce_hwseq *hws)
577 unsigned int in_vga1_mode = 0;
578 unsigned int in_vga2_mode = 0;
579 unsigned int in_vga3_mode = 0;
580 unsigned int in_vga4_mode = 0;
582 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
583 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
584 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
585 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
587 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
588 in_vga3_mode == 0 && in_vga4_mode == 0)
591 REG_WRITE(D1VGA_CONTROL, 0);
592 REG_WRITE(D2VGA_CONTROL, 0);
593 REG_WRITE(D3VGA_CONTROL, 0);
594 REG_WRITE(D4VGA_CONTROL, 0);
596 /* HW Engineer's Notes:
597 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
598 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
600 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
601 * VGA_TEST_ENABLE, to leave it in the same state as before.
603 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
604 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
608 * dcn10_dpp_pg_control - DPP power gate control.
610 * @hws: dce_hwseq reference.
611 * @dpp_inst: DPP instance reference.
612 * @power_on: true if we want to enable power gate, false otherwise.
614 * Enable or disable power gate in the specific DPP instance.
616 void dcn10_dpp_pg_control(
617 struct dce_hwseq *hws,
618 unsigned int dpp_inst,
621 uint32_t power_gate = power_on ? 0 : 1;
622 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
624 if (hws->ctx->dc->debug.disable_dpp_power_gate)
626 if (REG(DOMAIN1_PG_CONFIG) == 0)
631 REG_UPDATE(DOMAIN1_PG_CONFIG,
632 DOMAIN1_POWER_GATE, power_gate);
634 REG_WAIT(DOMAIN1_PG_STATUS,
635 DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
639 REG_UPDATE(DOMAIN3_PG_CONFIG,
640 DOMAIN3_POWER_GATE, power_gate);
642 REG_WAIT(DOMAIN3_PG_STATUS,
643 DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
647 REG_UPDATE(DOMAIN5_PG_CONFIG,
648 DOMAIN5_POWER_GATE, power_gate);
650 REG_WAIT(DOMAIN5_PG_STATUS,
651 DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
655 REG_UPDATE(DOMAIN7_PG_CONFIG,
656 DOMAIN7_POWER_GATE, power_gate);
658 REG_WAIT(DOMAIN7_PG_STATUS,
659 DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
669 * dcn10_hubp_pg_control - HUBP power gate control.
671 * @hws: dce_hwseq reference.
672 * @hubp_inst: DPP instance reference.
673 * @power_on: true if we want to enable power gate, false otherwise.
675 * Enable or disable power gate in the specific HUBP instance.
677 void dcn10_hubp_pg_control(
678 struct dce_hwseq *hws,
679 unsigned int hubp_inst,
682 uint32_t power_gate = power_on ? 0 : 1;
683 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
685 if (hws->ctx->dc->debug.disable_hubp_power_gate)
687 if (REG(DOMAIN0_PG_CONFIG) == 0)
691 case 0: /* DCHUBP0 */
692 REG_UPDATE(DOMAIN0_PG_CONFIG,
693 DOMAIN0_POWER_GATE, power_gate);
695 REG_WAIT(DOMAIN0_PG_STATUS,
696 DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
699 case 1: /* DCHUBP1 */
700 REG_UPDATE(DOMAIN2_PG_CONFIG,
701 DOMAIN2_POWER_GATE, power_gate);
703 REG_WAIT(DOMAIN2_PG_STATUS,
704 DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
707 case 2: /* DCHUBP2 */
708 REG_UPDATE(DOMAIN4_PG_CONFIG,
709 DOMAIN4_POWER_GATE, power_gate);
711 REG_WAIT(DOMAIN4_PG_STATUS,
712 DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
715 case 3: /* DCHUBP3 */
716 REG_UPDATE(DOMAIN6_PG_CONFIG,
717 DOMAIN6_POWER_GATE, power_gate);
719 REG_WAIT(DOMAIN6_PG_STATUS,
720 DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
729 static void power_on_plane_resources(
730 struct dce_hwseq *hws,
733 DC_LOGGER_INIT(hws->ctx->logger);
735 if (hws->funcs.dpp_root_clock_control)
736 hws->funcs.dpp_root_clock_control(hws, plane_id, true);
738 if (REG(DC_IP_REQUEST_CNTL)) {
739 REG_SET(DC_IP_REQUEST_CNTL, 0,
742 if (hws->funcs.dpp_pg_control)
743 hws->funcs.dpp_pg_control(hws, plane_id, true);
745 if (hws->funcs.hubp_pg_control)
746 hws->funcs.hubp_pg_control(hws, plane_id, true);
748 REG_SET(DC_IP_REQUEST_CNTL, 0,
751 "Un-gated front end for pipe %d\n", plane_id);
755 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
757 struct dce_hwseq *hws = dc->hwseq;
758 struct hubp *hubp = dc->res_pool->hubps[0];
760 if (!hws->wa_state.DEGVIDCN10_253_applied)
763 hubp->funcs->set_blank(hubp, true);
765 REG_SET(DC_IP_REQUEST_CNTL, 0,
768 hws->funcs.hubp_pg_control(hws, 0, false);
769 REG_SET(DC_IP_REQUEST_CNTL, 0,
772 hws->wa_state.DEGVIDCN10_253_applied = false;
775 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
777 struct dce_hwseq *hws = dc->hwseq;
778 struct hubp *hubp = dc->res_pool->hubps[0];
781 if (dc->debug.disable_stutter)
784 if (!hws->wa.DEGVIDCN10_253)
787 for (i = 0; i < dc->res_pool->pipe_count; i++) {
788 if (!dc->res_pool->hubps[i]->power_gated)
792 /* all pipe power gated, apply work around to enable stutter. */
794 REG_SET(DC_IP_REQUEST_CNTL, 0,
797 hws->funcs.hubp_pg_control(hws, 0, true);
798 REG_SET(DC_IP_REQUEST_CNTL, 0,
801 hubp->funcs->set_hubp_blank_en(hubp, false);
802 hws->wa_state.DEGVIDCN10_253_applied = true;
805 void dcn10_bios_golden_init(struct dc *dc)
807 struct dce_hwseq *hws = dc->hwseq;
808 struct dc_bios *bp = dc->ctx->dc_bios;
810 bool allow_self_fresh_force_enable = true;
812 if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
815 if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
816 allow_self_fresh_force_enable =
817 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
820 /* WA for making DF sleep when idle after resume from S0i3.
821 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
822 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
823 * before calling command table and it changed to 1 after,
824 * it should be set back to 0.
827 /* initialize dcn global */
828 bp->funcs->enable_disp_power_gating(bp,
829 CONTROLLER_ID_D0, ASIC_PIPE_INIT);
831 for (i = 0; i < dc->res_pool->pipe_count; i++) {
832 /* initialize dcn per pipe */
833 bp->funcs->enable_disp_power_gating(bp,
834 CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
837 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
838 if (allow_self_fresh_force_enable == false &&
839 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
840 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
841 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
845 static void false_optc_underflow_wa(
847 const struct dc_stream_state *stream,
848 struct timing_generator *tg)
853 if (!dc->hwseq->wa.false_optc_underflow)
856 underflow = tg->funcs->is_optc_underflow_occurred(tg);
858 for (i = 0; i < dc->res_pool->pipe_count; i++) {
859 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
861 if (old_pipe_ctx->stream != stream)
864 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
867 if (tg->funcs->set_blank_data_double_buffer)
868 tg->funcs->set_blank_data_double_buffer(tg, true);
870 if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
871 tg->funcs->clear_optc_underflow(tg);
874 static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
876 struct pipe_ctx *other_pipe;
877 int vready_offset = pipe->pipe_dlg_param.vready_offset;
879 /* Always use the largest vready_offset of all connected pipes */
880 for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
881 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
882 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
884 for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
885 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
886 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
888 for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
889 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
890 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
892 for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
893 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
894 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
897 return vready_offset;
900 enum dc_status dcn10_enable_stream_timing(
901 struct pipe_ctx *pipe_ctx,
902 struct dc_state *context,
905 struct dc_stream_state *stream = pipe_ctx->stream;
906 enum dc_color_space color_space;
907 struct tg_color black_color = {0};
909 /* by upper caller loop, pipe0 is parent pipe and be called first.
910 * back end is set up by for pipe0. Other children pipe share back end
911 * with pipe 0. No program is needed.
913 if (pipe_ctx->top_pipe != NULL)
916 /* TODO check if timing_changed, disable stream if timing changed */
918 /* HW program guide assume display already disable
919 * by unplug sequence. OTG assume stop.
921 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
923 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
924 pipe_ctx->clock_source,
925 &pipe_ctx->stream_res.pix_clk_params,
926 dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
927 &pipe_ctx->pll_settings)) {
929 return DC_ERROR_UNEXPECTED;
932 if (dc_is_hdmi_tmds_signal(stream->signal)) {
933 stream->link->phy_state.symclk_ref_cnts.otg = 1;
934 if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
935 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
937 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
940 pipe_ctx->stream_res.tg->funcs->program_timing(
941 pipe_ctx->stream_res.tg,
943 calculate_vready_offset_for_group(pipe_ctx),
944 pipe_ctx->pipe_dlg_param.vstartup_start,
945 pipe_ctx->pipe_dlg_param.vupdate_offset,
946 pipe_ctx->pipe_dlg_param.vupdate_width,
947 pipe_ctx->stream->signal,
950 #if 0 /* move to after enable_crtc */
951 /* TODO: OPP FMT, ABM. etc. should be done here. */
952 /* or FPGA now. instance 0 only. TODO: move to opp.c */
954 inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
956 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
957 pipe_ctx->stream_res.opp,
958 &stream->bit_depth_params,
961 /* program otg blank color */
962 color_space = stream->output_color_space;
963 color_space_to_black_color(dc, color_space, &black_color);
966 * The way 420 is packed, 2 channels carry Y component, 1 channel
967 * alternate between Cb and Cr, so both channels need the pixel
970 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
971 black_color.color_r_cr = black_color.color_g_y;
973 if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
974 pipe_ctx->stream_res.tg->funcs->set_blank_color(
975 pipe_ctx->stream_res.tg,
978 if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
979 !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
980 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
981 hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
982 false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
985 /* VTG is within DCHUB command block. DCFCLK is always on */
986 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
988 return DC_ERROR_UNEXPECTED;
991 /* TODO program crtc source select for non-virtual signal*/
992 /* TODO program FMT */
993 /* TODO setup link_enc */
994 /* TODO set stream attributes */
995 /* TODO program audio */
996 /* TODO enable stream if timing changed */
997 /* TODO unblank stream if DP */
1002 static void dcn10_reset_back_end_for_pipe(
1004 struct pipe_ctx *pipe_ctx,
1005 struct dc_state *context)
1008 struct dc_link *link;
1009 DC_LOGGER_INIT(dc->ctx->logger);
1010 if (pipe_ctx->stream_res.stream_enc == NULL) {
1011 pipe_ctx->stream = NULL;
1015 link = pipe_ctx->stream->link;
1016 /* DPMS may already disable or */
1017 /* dpms_off status is incorrect due to fastboot
1018 * feature. When system resume from S4 with second
1019 * screen only, the dpms_off would be true but
1020 * VBIOS lit up eDP, so check link status too.
1022 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1023 dc->link_srv->set_dpms_off(pipe_ctx);
1024 else if (pipe_ctx->stream_res.audio)
1025 dc->hwss.disable_audio_stream(pipe_ctx);
1027 if (pipe_ctx->stream_res.audio) {
1028 /*disable az_endpoint*/
1029 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1032 if (dc->caps.dynamic_audio == true) {
1033 /*we have to dynamic arbitrate the audio endpoints*/
1034 /*we free the resource, need reset is_audio_acquired*/
1035 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1036 pipe_ctx->stream_res.audio, false);
1037 pipe_ctx->stream_res.audio = NULL;
1041 /* by upper caller loop, parent pipe: pipe0, will be reset last.
1042 * back end share by all pipes and will be disable only when disable
1045 if (pipe_ctx->top_pipe == NULL) {
1047 if (pipe_ctx->stream_res.abm)
1048 dc->hwss.set_abm_immediate_disable(pipe_ctx);
1050 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1052 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1053 if (pipe_ctx->stream_res.tg->funcs->set_drr)
1054 pipe_ctx->stream_res.tg->funcs->set_drr(
1055 pipe_ctx->stream_res.tg, NULL);
1056 pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
1059 for (i = 0; i < dc->res_pool->pipe_count; i++)
1060 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1063 if (i == dc->res_pool->pipe_count)
1066 pipe_ctx->stream = NULL;
1067 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1068 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1071 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
1075 bool need_recover = true;
1077 if (!dc->debug.recovery_enabled)
1080 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1081 struct pipe_ctx *pipe_ctx =
1082 &dc->current_state->res_ctx.pipe_ctx[i];
1083 if (pipe_ctx != NULL) {
1084 hubp = pipe_ctx->plane_res.hubp;
1085 if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
1086 if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
1087 /* one pipe underflow, we will reset all the pipes*/
1088 need_recover = true;
1096 DCHUBP_CNTL:HUBP_BLANK_EN=1
1097 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1098 DCHUBP_CNTL:HUBP_DISABLE=1
1099 DCHUBP_CNTL:HUBP_DISABLE=0
1100 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1101 DCSURF_PRIMARY_SURFACE_ADDRESS
1102 DCHUBP_CNTL:HUBP_BLANK_EN=0
1105 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1106 struct pipe_ctx *pipe_ctx =
1107 &dc->current_state->res_ctx.pipe_ctx[i];
1108 if (pipe_ctx != NULL) {
1109 hubp = pipe_ctx->plane_res.hubp;
1110 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1111 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1112 hubp->funcs->set_hubp_blank_en(hubp, true);
1115 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1116 hubbub1_soft_reset(dc->res_pool->hubbub, true);
1118 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1119 struct pipe_ctx *pipe_ctx =
1120 &dc->current_state->res_ctx.pipe_ctx[i];
1121 if (pipe_ctx != NULL) {
1122 hubp = pipe_ctx->plane_res.hubp;
1123 /*DCHUBP_CNTL:HUBP_DISABLE=1*/
1124 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1125 hubp->funcs->hubp_disable_control(hubp, true);
1128 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1129 struct pipe_ctx *pipe_ctx =
1130 &dc->current_state->res_ctx.pipe_ctx[i];
1131 if (pipe_ctx != NULL) {
1132 hubp = pipe_ctx->plane_res.hubp;
1133 /*DCHUBP_CNTL:HUBP_DISABLE=0*/
1134 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1135 hubp->funcs->hubp_disable_control(hubp, true);
1138 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1139 hubbub1_soft_reset(dc->res_pool->hubbub, false);
1140 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1141 struct pipe_ctx *pipe_ctx =
1142 &dc->current_state->res_ctx.pipe_ctx[i];
1143 if (pipe_ctx != NULL) {
1144 hubp = pipe_ctx->plane_res.hubp;
1145 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1146 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1147 hubp->funcs->set_hubp_blank_en(hubp, true);
1154 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1156 struct hubbub *hubbub = dc->res_pool->hubbub;
1157 static bool should_log_hw_state; /* prevent hw state log by default */
1159 if (!hubbub->funcs->verify_allow_pstate_change_high)
1162 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
1165 if (should_log_hw_state)
1166 dcn10_log_hw_state(dc, NULL);
1168 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1169 BREAK_TO_DEBUGGER();
1170 if (dcn10_hw_wa_force_recovery(dc)) {
1172 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
1173 BREAK_TO_DEBUGGER();
1178 /* trigger HW to start disconnect plane from stream on the next vsync */
1179 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1181 struct dce_hwseq *hws = dc->hwseq;
1182 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1183 int dpp_id = pipe_ctx->plane_res.dpp->inst;
1184 struct mpc *mpc = dc->res_pool->mpc;
1185 struct mpc_tree *mpc_tree_params;
1186 struct mpcc *mpcc_to_remove = NULL;
1187 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1189 mpc_tree_params = &(opp->mpc_tree_params);
1190 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1193 if (mpcc_to_remove == NULL)
1196 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1197 // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
1198 // so don't wait for MPCC_IDLE in the programming sequence
1199 if (opp != NULL && !pipe_ctx->plane_state->is_phantom)
1200 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1202 dc->optimized_required = true;
1204 if (hubp->funcs->hubp_disconnect)
1205 hubp->funcs->hubp_disconnect(hubp);
1207 if (dc->debug.sanity_checks)
1208 hws->funcs.verify_allow_pstate_change_high(dc);
1212 * dcn10_plane_atomic_power_down - Power down plane components.
1214 * @dc: dc struct reference. used for grab hwseq.
1215 * @dpp: dpp struct reference.
1216 * @hubp: hubp struct reference.
1218 * Keep in mind that this operation requires a power gate configuration;
1219 * however, requests for switch power gate are precisely controlled to avoid
1220 * problems. For this reason, power gate request is usually disabled. This
1221 * function first needs to enable the power gate request before disabling DPP
1222 * and HUBP. Finally, it disables the power gate request again.
1224 void dcn10_plane_atomic_power_down(struct dc *dc,
1228 struct dce_hwseq *hws = dc->hwseq;
1229 DC_LOGGER_INIT(dc->ctx->logger);
1231 if (REG(DC_IP_REQUEST_CNTL)) {
1232 REG_SET(DC_IP_REQUEST_CNTL, 0,
1235 if (hws->funcs.dpp_pg_control)
1236 hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1238 if (hws->funcs.hubp_pg_control)
1239 hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1241 dpp->funcs->dpp_reset(dpp);
1243 REG_SET(DC_IP_REQUEST_CNTL, 0,
1246 "Power gated front end %d\n", hubp->inst);
1249 if (hws->funcs.dpp_root_clock_control)
1250 hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
1253 /* disable HW used by plane.
1254 * note: cannot disable until disconnect is complete
1256 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1258 struct dce_hwseq *hws = dc->hwseq;
1259 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1260 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1261 int opp_id = hubp->opp_id;
1263 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1265 hubp->funcs->hubp_clk_cntl(hubp, false);
1267 dpp->funcs->dpp_dppclk_control(dpp, false, false);
1269 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1270 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1271 pipe_ctx->stream_res.opp,
1274 hubp->power_gated = true;
1275 dc->optimized_required = false; /* We're powering off, no need to optimize */
1277 hws->funcs.plane_atomic_power_down(dc,
1278 pipe_ctx->plane_res.dpp,
1279 pipe_ctx->plane_res.hubp);
1281 pipe_ctx->stream = NULL;
1282 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1283 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1284 pipe_ctx->top_pipe = NULL;
1285 pipe_ctx->bottom_pipe = NULL;
1286 pipe_ctx->plane_state = NULL;
1289 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1291 struct dce_hwseq *hws = dc->hwseq;
1292 DC_LOGGER_INIT(dc->ctx->logger);
1294 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1297 hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1299 apply_DEGVIDCN10_253_wa(dc);
1301 DC_LOG_DC("Power down front end %d\n",
1302 pipe_ctx->pipe_idx);
1305 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1308 struct dce_hwseq *hws = dc->hwseq;
1309 struct hubbub *hubbub = dc->res_pool->hubbub;
1310 bool can_apply_seamless_boot = false;
1312 for (i = 0; i < context->stream_count; i++) {
1313 if (context->streams[i]->apply_seamless_boot_optimization) {
1314 can_apply_seamless_boot = true;
1319 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1320 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1321 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1323 /* There is assumption that pipe_ctx is not mapping irregularly
1324 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1325 * we will use the pipe, so don't disable
1327 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1330 /* Blank controller using driver code instead of
1333 if (tg->funcs->is_tg_enabled(tg)) {
1334 if (hws->funcs.init_blank != NULL) {
1335 hws->funcs.init_blank(dc, tg);
1336 tg->funcs->lock(tg);
1338 tg->funcs->lock(tg);
1339 tg->funcs->set_blank(tg, true);
1340 hwss_wait_for_blank_complete(tg);
1345 /* Reset det size */
1346 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1347 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1348 struct hubp *hubp = dc->res_pool->hubps[i];
1350 /* Do not need to reset for seamless boot */
1351 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1354 if (hubbub && hubp) {
1355 if (hubbub->funcs->program_det_size)
1356 hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
1360 /* num_opp will be equal to number of mpcc */
1361 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1362 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1364 /* Cannot reset the MPC mux if seamless boot */
1365 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1368 dc->res_pool->mpc->funcs->mpc_init_single_inst(
1369 dc->res_pool->mpc, i);
1372 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1373 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1374 struct hubp *hubp = dc->res_pool->hubps[i];
1375 struct dpp *dpp = dc->res_pool->dpps[i];
1376 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1378 /* There is assumption that pipe_ctx is not mapping irregularly
1379 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1380 * we will use the pipe, so don't disable
1382 if (can_apply_seamless_boot &&
1383 pipe_ctx->stream != NULL &&
1384 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1385 pipe_ctx->stream_res.tg)) {
1386 // Enable double buffering for OTG_BLANK no matter if
1387 // seamless boot is enabled or not to suppress global sync
1388 // signals when OTG blanked. This is to prevent pipe from
1389 // requesting data while in PSR.
1390 tg->funcs->tg_init(tg);
1391 hubp->power_gated = true;
1395 /* Disable on the current state so the new one isn't cleared. */
1396 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1398 dpp->funcs->dpp_reset(dpp);
1400 pipe_ctx->stream_res.tg = tg;
1401 pipe_ctx->pipe_idx = i;
1403 pipe_ctx->plane_res.hubp = hubp;
1404 pipe_ctx->plane_res.dpp = dpp;
1405 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1406 hubp->mpcc_id = dpp->inst;
1407 hubp->opp_id = OPP_ID_INVALID;
1408 hubp->power_gated = false;
1410 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1411 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1412 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1413 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1415 hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1417 if (tg->funcs->is_tg_enabled(tg))
1418 tg->funcs->unlock(tg);
1420 dc->hwss.disable_plane(dc, pipe_ctx);
1422 pipe_ctx->stream_res.tg = NULL;
1423 pipe_ctx->plane_res.hubp = NULL;
1425 if (tg->funcs->is_tg_enabled(tg)) {
1426 if (tg->funcs->init_odm)
1427 tg->funcs->init_odm(tg);
1430 tg->funcs->tg_init(tg);
1433 /* Power gate DSCs */
1434 if (hws->funcs.dsc_pg_control != NULL) {
1435 uint32_t num_opps = 0;
1436 uint32_t opp_id_src0 = OPP_ID_INVALID;
1437 uint32_t opp_id_src1 = OPP_ID_INVALID;
1439 // Step 1: To find out which OPTC is running & OPTC DSC is ON
1440 // We can't use res_pool->res_cap->num_timing_generator to check
1441 // Because it records display pipes default setting built in driver,
1442 // not display pipes of the current chip.
1443 // Some ASICs would be fused display pipes less than the default setting.
1444 // In dcnxx_resource_construct function, driver would obatin real information.
1445 for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1446 uint32_t optc_dsc_state = 0;
1447 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1449 if (tg->funcs->is_tg_enabled(tg)) {
1450 if (tg->funcs->get_dsc_status)
1451 tg->funcs->get_dsc_status(tg, &optc_dsc_state);
1452 // Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
1453 // non-zero value is DSC enabled
1454 if (optc_dsc_state != 0) {
1455 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1461 // Step 2: To power down DSC but skip DSC of running OPTC
1462 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1463 struct dcn_dsc_state s = {0};
1465 dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
1467 if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
1468 s.dsc_clock_en && s.dsc_fw_en)
1471 hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
1476 void dcn10_init_hw(struct dc *dc)
1479 struct abm *abm = dc->res_pool->abm;
1480 struct dmcu *dmcu = dc->res_pool->dmcu;
1481 struct dce_hwseq *hws = dc->hwseq;
1482 struct dc_bios *dcb = dc->ctx->dc_bios;
1483 struct resource_pool *res_pool = dc->res_pool;
1484 uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1485 bool is_optimized_init_done = false;
1487 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1488 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1490 /* Align bw context with hw config when system resume. */
1491 if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
1492 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
1493 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
1496 // Initialize the dccg
1497 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1498 dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1500 if (!dcb->funcs->is_accelerated_mode(dcb))
1501 hws->funcs.disable_vga(dc->hwseq);
1503 if (!dc_dmub_srv_optimized_init_done(dc->ctx->dmub_srv))
1504 hws->funcs.bios_golden_init(dc);
1507 if (dc->ctx->dc_bios->fw_info_valid) {
1508 res_pool->ref_clocks.xtalin_clock_inKhz =
1509 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1511 if (res_pool->dccg && res_pool->hubbub) {
1513 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1514 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1515 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
1517 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1518 res_pool->ref_clocks.dccg_ref_clock_inKhz,
1519 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
1521 // Not all ASICs have DCCG sw component
1522 res_pool->ref_clocks.dccg_ref_clock_inKhz =
1523 res_pool->ref_clocks.xtalin_clock_inKhz;
1524 res_pool->ref_clocks.dchub_ref_clock_inKhz =
1525 res_pool->ref_clocks.xtalin_clock_inKhz;
1528 ASSERT_CRITICAL(false);
1530 for (i = 0; i < dc->link_count; i++) {
1531 /* Power up AND update implementation according to the
1532 * required signal (which may be different from the
1533 * default signal on connector).
1535 struct dc_link *link = dc->links[i];
1537 if (!is_optimized_init_done)
1538 link->link_enc->funcs->hw_init(link->link_enc);
1540 /* Check for enabled DIG to identify enabled display */
1541 if (link->link_enc->funcs->is_dig_enabled &&
1542 link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1543 link->link_status.link_active = true;
1544 if (link->link_enc->funcs->fec_is_active &&
1545 link->link_enc->funcs->fec_is_active(link->link_enc))
1546 link->fec_state = dc_link_fec_enabled;
1550 /* we want to turn off all dp displays before doing detection */
1551 dc->link_srv->blank_all_dp_displays(dc);
1553 if (hws->funcs.enable_power_gating_plane)
1554 hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1556 /* If taking control over from VBIOS, we may want to optimize our first
1557 * mode set, so we need to skip powering down pipes until we know which
1558 * pipes we want to use.
1559 * Otherwise, if taking control is not possible, we need to power
1562 if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
1563 if (!is_optimized_init_done) {
1564 hws->funcs.init_pipes(dc, dc->current_state);
1565 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1566 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1567 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1571 if (!is_optimized_init_done) {
1573 for (i = 0; i < res_pool->audio_count; i++) {
1574 struct audio *audio = res_pool->audios[i];
1576 audio->funcs->hw_init(audio);
1579 for (i = 0; i < dc->link_count; i++) {
1580 struct dc_link *link = dc->links[i];
1582 if (link->panel_cntl)
1583 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1587 abm->funcs->abm_init(abm, backlight);
1589 if (dmcu != NULL && !dmcu->auto_load_dmcu)
1590 dmcu->funcs->dmcu_init(dmcu);
1593 if (abm != NULL && dmcu != NULL)
1594 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1596 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1597 if (!is_optimized_init_done)
1598 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1600 if (!dc->debug.disable_clock_gate) {
1601 /* enable all DCN clock gating */
1602 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1604 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1606 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1609 if (dc->clk_mgr->funcs->notify_wm_ranges)
1610 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1613 /* In headless boot cases, DIG may be turned
1614 * on which causes HW/SW discrepancies.
1615 * To avoid this, power down hardware on boot
1616 * if DIG is turned on
1618 void dcn10_power_down_on_boot(struct dc *dc)
1620 struct dc_link *edp_links[MAX_NUM_EDP];
1621 struct dc_link *edp_link = NULL;
1625 dc_get_edp_links(dc, edp_links, &edp_num);
1627 edp_link = edp_links[0];
1629 if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1630 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1631 dc->hwseq->funcs.edp_backlight_control &&
1632 dc->hwss.power_down &&
1633 dc->hwss.edp_power_control) {
1634 dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1635 dc->hwss.power_down(dc);
1636 dc->hwss.edp_power_control(edp_link, false);
1638 for (i = 0; i < dc->link_count; i++) {
1639 struct dc_link *link = dc->links[i];
1641 if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
1642 link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1643 dc->hwss.power_down) {
1644 dc->hwss.power_down(dc);
1652 * Call update_clocks with empty context
1653 * to send DISPLAY_OFF
1654 * Otherwise DISPLAY_OFF may not be asserted
1656 if (dc->clk_mgr->funcs->set_low_power_state)
1657 dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1660 void dcn10_reset_hw_ctx_wrap(
1662 struct dc_state *context)
1665 struct dce_hwseq *hws = dc->hwseq;
1668 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1669 struct pipe_ctx *pipe_ctx_old =
1670 &dc->current_state->res_ctx.pipe_ctx[i];
1671 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1673 if (!pipe_ctx_old->stream)
1676 if (pipe_ctx_old->top_pipe)
1679 if (!pipe_ctx->stream ||
1680 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1681 struct clock_source *old_clk = pipe_ctx_old->clock_source;
1683 dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1684 if (hws->funcs.enable_stream_gating)
1685 hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1687 old_clk->funcs->cs_power_down(old_clk);
1692 static bool patch_address_for_sbs_tb_stereo(
1693 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1695 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1696 bool sec_split = pipe_ctx->top_pipe &&
1697 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1698 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1699 (pipe_ctx->stream->timing.timing_3d_format ==
1700 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1701 pipe_ctx->stream->timing.timing_3d_format ==
1702 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1703 *addr = plane_state->address.grph_stereo.left_addr;
1704 plane_state->address.grph_stereo.left_addr =
1705 plane_state->address.grph_stereo.right_addr;
1708 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1709 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1710 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1711 plane_state->address.grph_stereo.right_addr =
1712 plane_state->address.grph_stereo.left_addr;
1713 plane_state->address.grph_stereo.right_meta_addr =
1714 plane_state->address.grph_stereo.left_meta_addr;
1720 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1722 bool addr_patched = false;
1723 PHYSICAL_ADDRESS_LOC addr;
1724 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1726 if (plane_state == NULL)
1729 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1731 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1732 pipe_ctx->plane_res.hubp,
1733 &plane_state->address,
1734 plane_state->flip_immediate);
1736 plane_state->status.requested_address = plane_state->address;
1738 if (plane_state->flip_immediate)
1739 plane_state->status.current_address = plane_state->address;
1742 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1745 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1746 const struct dc_plane_state *plane_state)
1748 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1749 const struct dc_transfer_func *tf = NULL;
1752 if (dpp_base == NULL)
1755 if (plane_state->in_transfer_func)
1756 tf = plane_state->in_transfer_func;
1758 if (plane_state->gamma_correction &&
1759 !dpp_base->ctx->dc->debug.always_use_regamma
1760 && !plane_state->gamma_correction->is_identity
1761 && dce_use_lut(plane_state->format))
1762 dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1765 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1766 else if (tf->type == TF_TYPE_PREDEFINED) {
1768 case TRANSFER_FUNCTION_SRGB:
1769 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1771 case TRANSFER_FUNCTION_BT709:
1772 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1774 case TRANSFER_FUNCTION_LINEAR:
1775 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1777 case TRANSFER_FUNCTION_PQ:
1778 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1779 cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1780 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1787 } else if (tf->type == TF_TYPE_BYPASS) {
1788 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1790 cm_helper_translate_curve_to_degamma_hw_format(tf,
1791 &dpp_base->degamma_params);
1792 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1793 &dpp_base->degamma_params);
1800 #define MAX_NUM_HW_POINTS 0x200
1802 static void log_tf(struct dc_context *ctx,
1803 struct dc_transfer_func *tf, uint32_t hw_points_num)
1805 // DC_LOG_GAMMA is default logging of all hw points
1806 // DC_LOG_ALL_GAMMA logs all points, not only hw points
1807 // DC_LOG_ALL_TF_POINTS logs all channels of the tf
1810 DC_LOGGER_INIT(ctx->logger);
1811 DC_LOG_GAMMA("Gamma Correction TF");
1812 DC_LOG_ALL_GAMMA("Logging all tf points...");
1813 DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1815 for (i = 0; i < hw_points_num; i++) {
1816 DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1817 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1818 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1821 for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1822 DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1823 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1824 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1828 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1829 const struct dc_stream_state *stream)
1831 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1836 dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1838 if (stream->out_transfer_func &&
1839 stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1840 stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1841 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1843 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1846 else if (cm_helper_translate_curve_to_hw_format(dc->ctx,
1847 stream->out_transfer_func,
1848 &dpp->regamma_params, false)) {
1849 dpp->funcs->dpp_program_regamma_pwl(
1851 &dpp->regamma_params, OPP_REGAMMA_USER);
1853 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1855 if (stream != NULL && stream->ctx != NULL &&
1856 stream->out_transfer_func != NULL) {
1858 stream->out_transfer_func,
1859 dpp->regamma_params.hw_points_num);
1865 void dcn10_pipe_control_lock(
1867 struct pipe_ctx *pipe,
1870 struct dce_hwseq *hws = dc->hwseq;
1872 /* use TG master update lock to lock everything on the TG
1873 * therefore only top pipe need to lock
1875 if (!pipe || pipe->top_pipe)
1878 if (dc->debug.sanity_checks)
1879 hws->funcs.verify_allow_pstate_change_high(dc);
1882 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1884 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1886 if (dc->debug.sanity_checks)
1887 hws->funcs.verify_allow_pstate_change_high(dc);
1891 * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1893 * Software keepout workaround to prevent cursor update locking from stalling
1894 * out cursor updates indefinitely or from old values from being retained in
1895 * the case where the viewport changes in the same frame as the cursor.
1897 * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1898 * too close to VUPDATE, then stall out until VUPDATE finishes.
1900 * TODO: Optimize cursor programming to be once per frame before VUPDATE
1901 * to avoid the need for this workaround.
1903 * @dc: Current DC state
1904 * @pipe_ctx: Pipe_ctx pointer for delayed cursor update
1908 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1910 struct dc_stream_state *stream = pipe_ctx->stream;
1911 struct crtc_position position;
1912 uint32_t vupdate_start, vupdate_end;
1913 unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1914 unsigned int us_per_line, us_vupdate;
1916 if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1919 if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1922 dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1925 dc->hwss.get_position(&pipe_ctx, 1, &position);
1926 vpos = position.vertical_count;
1928 /* Avoid wraparound calculation issues */
1929 vupdate_start += stream->timing.v_total;
1930 vupdate_end += stream->timing.v_total;
1931 vpos += stream->timing.v_total;
1933 if (vpos <= vupdate_start) {
1934 /* VPOS is in VACTIVE or back porch. */
1935 lines_to_vupdate = vupdate_start - vpos;
1936 } else if (vpos > vupdate_end) {
1937 /* VPOS is in the front porch. */
1940 /* VPOS is in VUPDATE. */
1941 lines_to_vupdate = 0;
1944 /* Calculate time until VUPDATE in microseconds. */
1946 stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1947 us_to_vupdate = lines_to_vupdate * us_per_line;
1949 /* 70 us is a conservative estimate of cursor update time*/
1950 if (us_to_vupdate > 70)
1953 /* Stall out until the cursor update completes. */
1954 if (vupdate_end < vupdate_start)
1955 vupdate_end += stream->timing.v_total;
1956 us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1957 udelay(us_to_vupdate + us_vupdate);
1960 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1962 /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1963 if (!pipe || pipe->top_pipe)
1966 /* Prevent cursor lock from stalling out cursor updates. */
1968 delay_cursor_until_vupdate(dc, pipe);
1970 if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1971 union dmub_hw_lock_flags hw_locks = { 0 };
1972 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1974 hw_locks.bits.lock_cursor = 1;
1975 inst_flags.opp_inst = pipe->stream_res.opp->inst;
1977 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1982 dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1983 pipe->stream_res.opp->inst, lock);
1986 static bool wait_for_reset_trigger_to_occur(
1987 struct dc_context *dc_ctx,
1988 struct timing_generator *tg)
1992 /* To avoid endless loop we wait at most
1993 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1994 const uint32_t frames_to_wait_on_triggered_reset = 10;
1997 for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1999 if (!tg->funcs->is_counter_moving(tg)) {
2000 DC_ERROR("TG counter is not moving!\n");
2004 if (tg->funcs->did_triggered_reset_occur(tg)) {
2006 /* usually occurs at i=1 */
2007 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
2012 /* Wait for one frame. */
2013 tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
2014 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
2018 DC_ERROR("GSL: Timeout on reset trigger!\n");
2023 static uint64_t reduceSizeAndFraction(uint64_t *numerator,
2024 uint64_t *denominator,
2025 bool checkUint32Bounary)
2028 bool ret = checkUint32Bounary == false;
2029 uint64_t max_int32 = 0xffffffff;
2030 uint64_t num, denom;
2031 static const uint16_t prime_numbers[] = {
2032 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
2033 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
2034 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
2035 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
2036 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
2037 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
2038 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
2039 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
2040 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
2041 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
2042 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
2043 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
2044 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
2045 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
2046 941, 947, 953, 967, 971, 977, 983, 991, 997};
2047 int count = ARRAY_SIZE(prime_numbers);
2050 denom = *denominator;
2051 for (i = 0; i < count; i++) {
2052 uint32_t num_remainder, denom_remainder;
2053 uint64_t num_result, denom_result;
2054 if (checkUint32Bounary &&
2055 num <= max_int32 && denom <= max_int32) {
2060 num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
2061 denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
2062 if (num_remainder == 0 && denom_remainder == 0) {
2064 denom = denom_result;
2066 } while (num_remainder == 0 && denom_remainder == 0);
2069 *denominator = denom;
2073 static bool is_low_refresh_rate(struct pipe_ctx *pipe)
2075 uint32_t master_pipe_refresh_rate =
2076 pipe->stream->timing.pix_clk_100hz * 100 /
2077 pipe->stream->timing.h_total /
2078 pipe->stream->timing.v_total;
2079 return master_pipe_refresh_rate <= 30;
2082 static uint8_t get_clock_divider(struct pipe_ctx *pipe,
2083 bool account_low_refresh_rate)
2085 uint32_t clock_divider = 1;
2086 uint32_t numpipes = 1;
2088 if (account_low_refresh_rate && is_low_refresh_rate(pipe))
2091 if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2094 while (pipe->next_odm_pipe) {
2095 pipe = pipe->next_odm_pipe;
2098 clock_divider *= numpipes;
2100 return clock_divider;
2103 static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
2104 struct pipe_ctx *grouped_pipes[])
2106 struct dc_context *dc_ctx = dc->ctx;
2107 int i, master = -1, embedded = -1;
2108 struct dc_crtc_timing *hw_crtc_timing;
2109 uint64_t phase[MAX_PIPES];
2110 uint64_t modulo[MAX_PIPES];
2113 uint32_t embedded_pix_clk_100hz;
2114 uint16_t embedded_h_total;
2115 uint16_t embedded_v_total;
2116 uint32_t dp_ref_clk_100hz =
2117 dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2119 hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
2120 if (!hw_crtc_timing)
2123 if (dc->config.vblank_alignment_dto_params &&
2124 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2126 (dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2128 (dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2129 embedded_pix_clk_100hz =
2130 dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2132 for (i = 0; i < group_size; i++) {
2133 grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2134 grouped_pipes[i]->stream_res.tg,
2135 &hw_crtc_timing[i]);
2136 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2137 dc->res_pool->dp_clock_source,
2138 grouped_pipes[i]->stream_res.tg->inst,
2140 hw_crtc_timing[i].pix_clk_100hz = pclk;
2141 if (dc_is_embedded_signal(
2142 grouped_pipes[i]->stream->signal)) {
2145 phase[i] = embedded_pix_clk_100hz*100;
2146 modulo[i] = dp_ref_clk_100hz*100;
2149 phase[i] = (uint64_t)embedded_pix_clk_100hz*
2150 hw_crtc_timing[i].h_total*
2151 hw_crtc_timing[i].v_total;
2152 phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2153 modulo[i] = (uint64_t)dp_ref_clk_100hz*
2157 if (reduceSizeAndFraction(&phase[i],
2158 &modulo[i], true) == false) {
2160 * this will help to stop reporting
2161 * this timing synchronizable
2163 DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2164 grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2169 for (i = 0; i < group_size; i++) {
2170 if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2171 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2172 dc->res_pool->dp_clock_source,
2173 grouped_pipes[i]->stream_res.tg->inst,
2174 phase[i], modulo[i]);
2175 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2176 dc->res_pool->dp_clock_source,
2177 grouped_pipes[i]->stream_res.tg->inst, &pclk);
2178 grouped_pipes[i]->stream->timing.pix_clk_100hz =
2179 pclk*get_clock_divider(grouped_pipes[i], false);
2187 kfree(hw_crtc_timing);
2191 void dcn10_enable_vblanks_synchronization(
2195 struct pipe_ctx *grouped_pipes[])
2197 struct dc_context *dc_ctx = dc->ctx;
2198 struct output_pixel_processor *opp;
2199 struct timing_generator *tg;
2200 int i, width, height, master;
2202 for (i = 1; i < group_size; i++) {
2203 opp = grouped_pipes[i]->stream_res.opp;
2204 tg = grouped_pipes[i]->stream_res.tg;
2205 tg->funcs->get_otg_active_size(tg, &width, &height);
2207 if (!tg->funcs->is_tg_enabled(tg)) {
2208 DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2212 if (opp->funcs->opp_program_dpg_dimensions)
2213 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2216 for (i = 0; i < group_size; i++) {
2217 if (grouped_pipes[i]->stream == NULL)
2219 grouped_pipes[i]->stream->vblank_synchronized = false;
2220 grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2223 DC_SYNC_INFO("Aligning DP DTOs\n");
2225 master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2227 DC_SYNC_INFO("Synchronizing VBlanks\n");
2230 for (i = 0; i < group_size; i++) {
2231 if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2232 grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2233 grouped_pipes[master]->stream_res.tg,
2234 grouped_pipes[i]->stream_res.tg,
2235 grouped_pipes[master]->stream->timing.pix_clk_100hz,
2236 grouped_pipes[i]->stream->timing.pix_clk_100hz,
2237 get_clock_divider(grouped_pipes[master], false),
2238 get_clock_divider(grouped_pipes[i], false));
2239 grouped_pipes[i]->stream->vblank_synchronized = true;
2241 grouped_pipes[master]->stream->vblank_synchronized = true;
2242 DC_SYNC_INFO("Sync complete\n");
2245 for (i = 1; i < group_size; i++) {
2246 opp = grouped_pipes[i]->stream_res.opp;
2247 tg = grouped_pipes[i]->stream_res.tg;
2248 tg->funcs->get_otg_active_size(tg, &width, &height);
2249 if (opp->funcs->opp_program_dpg_dimensions)
2250 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2254 void dcn10_enable_timing_synchronization(
2258 struct pipe_ctx *grouped_pipes[])
2260 struct dc_context *dc_ctx = dc->ctx;
2261 struct output_pixel_processor *opp;
2262 struct timing_generator *tg;
2263 int i, width, height;
2265 DC_SYNC_INFO("Setting up OTG reset trigger\n");
2267 for (i = 1; i < group_size; i++) {
2268 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2271 opp = grouped_pipes[i]->stream_res.opp;
2272 tg = grouped_pipes[i]->stream_res.tg;
2273 tg->funcs->get_otg_active_size(tg, &width, &height);
2275 if (!tg->funcs->is_tg_enabled(tg)) {
2276 DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2280 if (opp->funcs->opp_program_dpg_dimensions)
2281 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2284 for (i = 0; i < group_size; i++) {
2285 if (grouped_pipes[i]->stream == NULL)
2288 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2291 grouped_pipes[i]->stream->vblank_synchronized = false;
2294 for (i = 1; i < group_size; i++) {
2295 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2298 grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2299 grouped_pipes[i]->stream_res.tg,
2300 grouped_pipes[0]->stream_res.tg->inst);
2303 DC_SYNC_INFO("Waiting for trigger\n");
2305 /* Need to get only check 1 pipe for having reset as all the others are
2306 * synchronized. Look at last pipe programmed to reset.
2309 if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM)
2310 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2312 for (i = 1; i < group_size; i++) {
2313 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2316 grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2317 grouped_pipes[i]->stream_res.tg);
2320 for (i = 1; i < group_size; i++) {
2321 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2324 opp = grouped_pipes[i]->stream_res.opp;
2325 tg = grouped_pipes[i]->stream_res.tg;
2326 tg->funcs->get_otg_active_size(tg, &width, &height);
2327 if (opp->funcs->opp_program_dpg_dimensions)
2328 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2331 DC_SYNC_INFO("Sync complete\n");
2334 void dcn10_enable_per_frame_crtc_position_reset(
2337 struct pipe_ctx *grouped_pipes[])
2339 struct dc_context *dc_ctx = dc->ctx;
2342 DC_SYNC_INFO("Setting up\n");
2343 for (i = 0; i < group_size; i++)
2344 if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2345 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2346 grouped_pipes[i]->stream_res.tg,
2348 &grouped_pipes[i]->stream->triggered_crtc_reset);
2350 DC_SYNC_INFO("Waiting for trigger\n");
2352 for (i = 0; i < group_size; i++)
2353 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2355 DC_SYNC_INFO("Multi-display sync is complete\n");
2358 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2359 struct vm_system_aperture_param *apt,
2360 struct dce_hwseq *hws)
2362 PHYSICAL_ADDRESS_LOC physical_page_number;
2363 uint32_t logical_addr_low;
2364 uint32_t logical_addr_high;
2366 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2367 PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2368 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2369 PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2371 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2372 LOGICAL_ADDR, &logical_addr_low);
2374 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2375 LOGICAL_ADDR, &logical_addr_high);
2377 apt->sys_default.quad_part = physical_page_number.quad_part << 12;
2378 apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
2379 apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
2382 /* Temporary read settings, future will get values from kmd directly */
2383 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2384 struct vm_context0_param *vm0,
2385 struct dce_hwseq *hws)
2387 PHYSICAL_ADDRESS_LOC fb_base;
2388 PHYSICAL_ADDRESS_LOC fb_offset;
2389 uint32_t fb_base_value;
2390 uint32_t fb_offset_value;
2392 REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2393 REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2395 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2396 PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2397 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2398 PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2400 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2401 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2402 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2403 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2405 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2406 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2407 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2408 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2410 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2411 PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2412 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2413 PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2416 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2417 * Therefore we need to do
2418 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2419 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2421 fb_base.quad_part = (uint64_t)fb_base_value << 24;
2422 fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2423 vm0->pte_base.quad_part += fb_base.quad_part;
2424 vm0->pte_base.quad_part -= fb_offset.quad_part;
2428 static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2430 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2431 struct vm_system_aperture_param apt = {0};
2432 struct vm_context0_param vm0 = {0};
2434 mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2435 mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2437 hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2438 hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2441 static void dcn10_enable_plane(
2443 struct pipe_ctx *pipe_ctx,
2444 struct dc_state *context)
2446 struct dce_hwseq *hws = dc->hwseq;
2448 if (dc->debug.sanity_checks) {
2449 hws->funcs.verify_allow_pstate_change_high(dc);
2452 undo_DEGVIDCN10_253_wa(dc);
2454 power_on_plane_resources(dc->hwseq,
2455 pipe_ctx->plane_res.hubp->inst);
2457 /* enable DCFCLK current DCHUB */
2458 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2460 /* make sure OPP_PIPE_CLOCK_EN = 1 */
2461 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2462 pipe_ctx->stream_res.opp,
2465 if (dc->config.gpu_vm_support)
2466 dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2468 if (dc->debug.sanity_checks) {
2469 hws->funcs.verify_allow_pstate_change_high(dc);
2472 if (!pipe_ctx->top_pipe
2473 && pipe_ctx->plane_state
2474 && pipe_ctx->plane_state->flip_int_enabled
2475 && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2476 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2480 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2483 struct dpp_grph_csc_adjustment adjust;
2484 memset(&adjust, 0, sizeof(adjust));
2485 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2488 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2489 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2490 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2491 adjust.temperature_matrix[i] =
2492 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2493 } else if (pipe_ctx->plane_state &&
2494 pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2495 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2496 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2497 adjust.temperature_matrix[i] =
2498 pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2501 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2505 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2507 if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2508 if (pipe_ctx->top_pipe) {
2509 struct pipe_ctx *top = pipe_ctx->top_pipe;
2511 while (top->top_pipe)
2512 top = top->top_pipe; // Traverse to top pipe_ctx
2513 if (top->plane_state && top->plane_state->layer_index == 0)
2514 return true; // Front MPO plane not hidden
2520 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2522 // Override rear plane RGB bias to fix MPO brightness
2523 uint16_t rgb_bias = matrix[3];
2528 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2529 matrix[3] = rgb_bias;
2530 matrix[7] = rgb_bias;
2531 matrix[11] = rgb_bias;
2534 void dcn10_program_output_csc(struct dc *dc,
2535 struct pipe_ctx *pipe_ctx,
2536 enum dc_color_space colorspace,
2540 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2541 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2543 /* MPO is broken with RGB colorspaces when OCSC matrix
2544 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2545 * Blending adds offsets from front + rear to rear plane
2547 * Fix is to set RGB bias to 0 on rear plane, top plane
2548 * black value pixels add offset instead of rear + front
2551 int16_t rgb_bias = matrix[3];
2552 // matrix[3/7/11] are all the same offset value
2554 if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2555 dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2557 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2561 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2562 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2566 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2568 struct dc_bias_and_scale bns_params = {0};
2570 // program the input csc
2571 dpp->funcs->dpp_setup(dpp,
2572 plane_state->format,
2573 EXPANSION_MODE_ZERO,
2574 plane_state->input_csc_color_matrix,
2575 plane_state->color_space,
2578 //set scale and bias registers
2579 build_prescale_params(&bns_params, plane_state);
2580 if (dpp->funcs->dpp_program_bias_and_scale)
2581 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2584 void dcn10_update_visual_confirm_color(struct dc *dc,
2585 struct pipe_ctx *pipe_ctx,
2588 struct mpc *mpc = dc->res_pool->mpc;
2590 if (mpc->funcs->set_bg_color) {
2591 memcpy(&pipe_ctx->plane_state->visual_confirm_color, &(pipe_ctx->visual_confirm_color), sizeof(struct tg_color));
2592 mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id);
2596 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2598 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2599 struct mpcc_blnd_cfg blnd_cfg = {0};
2600 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2602 struct mpcc *new_mpcc;
2603 struct mpc *mpc = dc->res_pool->mpc;
2604 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2606 blnd_cfg.overlap_only = false;
2607 blnd_cfg.global_gain = 0xff;
2609 if (per_pixel_alpha) {
2610 /* DCN1.0 has output CM before MPC which seems to screw with
2611 * pre-multiplied alpha.
2613 blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
2614 pipe_ctx->stream->output_color_space)
2615 && pipe_ctx->plane_state->pre_multiplied_alpha);
2616 if (pipe_ctx->plane_state->global_alpha) {
2617 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
2618 blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
2620 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2623 blnd_cfg.pre_multiplied_alpha = false;
2624 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2627 if (pipe_ctx->plane_state->global_alpha)
2628 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2630 blnd_cfg.global_alpha = 0xff;
2634 * Note: currently there is a bug in init_hw such that
2635 * on resume from hibernate, BIOS sets up MPCC0, and
2636 * we do mpcc_remove but the mpcc cannot go to idle
2637 * after remove. This cause us to pick mpcc1 here,
2638 * which causes a pstate hang for yet unknown reason.
2640 mpcc_id = hubp->inst;
2642 /* If there is no full update, don't need to touch MPC tree*/
2643 if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2644 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2645 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2649 /* check if this MPCC is already being used */
2650 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2651 /* remove MPCC if being used */
2652 if (new_mpcc != NULL)
2653 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2655 if (dc->debug.sanity_checks)
2656 mpc->funcs->assert_mpcc_idle_before_connect(
2657 dc->res_pool->mpc, mpcc_id);
2659 /* Call MPC to insert new plane */
2660 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2667 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2669 ASSERT(new_mpcc != NULL);
2670 hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2671 hubp->mpcc_id = mpcc_id;
2674 static void update_scaler(struct pipe_ctx *pipe_ctx)
2676 bool per_pixel_alpha =
2677 pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2679 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2680 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2681 /* scaler configuration */
2682 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2683 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2686 static void dcn10_update_dchubp_dpp(
2688 struct pipe_ctx *pipe_ctx,
2689 struct dc_state *context)
2691 struct dce_hwseq *hws = dc->hwseq;
2692 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2693 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2694 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2695 struct plane_size size = plane_state->plane_size;
2696 unsigned int compat_level = 0;
2697 bool should_divided_by_2 = false;
2699 /* depends on DML calculation, DPP clock value may change dynamically */
2700 /* If request max dpp clk is lower than current dispclk, no need to
2703 if (plane_state->update_flags.bits.full_update) {
2705 /* new calculated dispclk, dppclk are stored in
2706 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2707 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2708 * dcn10_validate_bandwidth compute new dispclk, dppclk.
2709 * dispclk will put in use after optimize_bandwidth when
2710 * ramp_up_dispclk_with_dpp is called.
2711 * there are two places for dppclk be put in use. One location
2712 * is the same as the location as dispclk. Another is within
2713 * update_dchubp_dpp which happens between pre_bandwidth and
2714 * optimize_bandwidth.
2715 * dppclk updated within update_dchubp_dpp will cause new
2716 * clock values of dispclk and dppclk not be in use at the same
2717 * time. when clocks are decreased, this may cause dppclk is
2718 * lower than previous configuration and let pipe stuck.
2719 * for example, eDP + external dp, change resolution of DP from
2720 * 1920x1080x144hz to 1280x960x60hz.
2721 * before change: dispclk = 337889 dppclk = 337889
2722 * change mode, dcn10_validate_bandwidth calculate
2723 * dispclk = 143122 dppclk = 143122
2724 * update_dchubp_dpp be executed before dispclk be updated,
2725 * dispclk = 337889, but dppclk use new value dispclk /2 =
2726 * 168944. this will cause pipe pstate warning issue.
2727 * solution: between pre_bandwidth and optimize_bandwidth, while
2728 * dispclk is going to be decreased, keep dppclk = dispclk
2730 if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2731 dc->clk_mgr->clks.dispclk_khz)
2732 should_divided_by_2 = false;
2734 should_divided_by_2 =
2735 context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2736 dc->clk_mgr->clks.dispclk_khz / 2;
2738 dpp->funcs->dpp_dppclk_control(
2740 should_divided_by_2,
2743 if (dc->res_pool->dccg)
2744 dc->res_pool->dccg->funcs->update_dpp_dto(
2747 pipe_ctx->plane_res.bw.dppclk_khz);
2749 dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2750 dc->clk_mgr->clks.dispclk_khz / 2 :
2751 dc->clk_mgr->clks.dispclk_khz;
2754 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2755 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2756 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2758 if (plane_state->update_flags.bits.full_update) {
2759 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2761 hubp->funcs->hubp_setup(
2763 &pipe_ctx->dlg_regs,
2764 &pipe_ctx->ttu_regs,
2766 &pipe_ctx->pipe_dlg_param);
2767 hubp->funcs->hubp_setup_interdependent(
2769 &pipe_ctx->dlg_regs,
2770 &pipe_ctx->ttu_regs);
2773 size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2775 if (plane_state->update_flags.bits.full_update ||
2776 plane_state->update_flags.bits.bpp_change)
2777 dcn10_update_dpp(dpp, plane_state);
2779 if (plane_state->update_flags.bits.full_update ||
2780 plane_state->update_flags.bits.per_pixel_alpha_change ||
2781 plane_state->update_flags.bits.global_alpha_change)
2782 hws->funcs.update_mpcc(dc, pipe_ctx);
2784 if (plane_state->update_flags.bits.full_update ||
2785 plane_state->update_flags.bits.per_pixel_alpha_change ||
2786 plane_state->update_flags.bits.global_alpha_change ||
2787 plane_state->update_flags.bits.scaling_change ||
2788 plane_state->update_flags.bits.position_change) {
2789 update_scaler(pipe_ctx);
2792 if (plane_state->update_flags.bits.full_update ||
2793 plane_state->update_flags.bits.scaling_change ||
2794 plane_state->update_flags.bits.position_change) {
2795 hubp->funcs->mem_program_viewport(
2797 &pipe_ctx->plane_res.scl_data.viewport,
2798 &pipe_ctx->plane_res.scl_data.viewport_c);
2801 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2802 dc->hwss.set_cursor_position(pipe_ctx);
2803 dc->hwss.set_cursor_attribute(pipe_ctx);
2805 if (dc->hwss.set_cursor_sdr_white_level)
2806 dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2809 if (plane_state->update_flags.bits.full_update) {
2811 dc->hwss.program_gamut_remap(pipe_ctx);
2813 dc->hwss.program_output_csc(dc,
2815 pipe_ctx->stream->output_color_space,
2816 pipe_ctx->stream->csc_color_matrix.matrix,
2817 pipe_ctx->stream_res.opp->inst);
2820 if (plane_state->update_flags.bits.full_update ||
2821 plane_state->update_flags.bits.pixel_format_change ||
2822 plane_state->update_flags.bits.horizontal_mirror_change ||
2823 plane_state->update_flags.bits.rotation_change ||
2824 plane_state->update_flags.bits.swizzle_change ||
2825 plane_state->update_flags.bits.dcc_change ||
2826 plane_state->update_flags.bits.bpp_change ||
2827 plane_state->update_flags.bits.scaling_change ||
2828 plane_state->update_flags.bits.plane_size_change) {
2829 hubp->funcs->hubp_program_surface_config(
2831 plane_state->format,
2832 &plane_state->tiling_info,
2834 plane_state->rotation,
2836 plane_state->horizontal_mirror,
2840 hubp->power_gated = false;
2842 hws->funcs.update_plane_addr(dc, pipe_ctx);
2844 if (is_pipe_tree_visible(pipe_ctx))
2845 hubp->funcs->set_blank(hubp, false);
2848 void dcn10_blank_pixel_data(
2850 struct pipe_ctx *pipe_ctx,
2853 enum dc_color_space color_space;
2854 struct tg_color black_color = {0};
2855 struct stream_resource *stream_res = &pipe_ctx->stream_res;
2856 struct dc_stream_state *stream = pipe_ctx->stream;
2858 /* program otg blank color */
2859 color_space = stream->output_color_space;
2860 color_space_to_black_color(dc, color_space, &black_color);
2863 * The way 420 is packed, 2 channels carry Y component, 1 channel
2864 * alternate between Cb and Cr, so both channels need the pixel
2867 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2868 black_color.color_r_cr = black_color.color_g_y;
2871 if (stream_res->tg->funcs->set_blank_color)
2872 stream_res->tg->funcs->set_blank_color(
2877 if (stream_res->tg->funcs->set_blank)
2878 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2879 if (stream_res->abm) {
2880 dc->hwss.set_pipe(pipe_ctx);
2881 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2884 dc->hwss.set_abm_immediate_disable(pipe_ctx);
2885 if (stream_res->tg->funcs->set_blank) {
2886 stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2887 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2892 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2894 struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2895 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2896 struct custom_float_format fmt;
2898 fmt.exponenta_bits = 6;
2899 fmt.mantissa_bits = 12;
2903 if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2904 convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2906 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2907 pipe_ctx->plane_res.dpp, hw_mult);
2910 void dcn10_program_pipe(
2912 struct pipe_ctx *pipe_ctx,
2913 struct dc_state *context)
2915 struct dce_hwseq *hws = dc->hwseq;
2917 if (pipe_ctx->top_pipe == NULL) {
2918 bool blank = !is_pipe_tree_visible(pipe_ctx);
2920 pipe_ctx->stream_res.tg->funcs->program_global_sync(
2921 pipe_ctx->stream_res.tg,
2922 calculate_vready_offset_for_group(pipe_ctx),
2923 pipe_ctx->pipe_dlg_param.vstartup_start,
2924 pipe_ctx->pipe_dlg_param.vupdate_offset,
2925 pipe_ctx->pipe_dlg_param.vupdate_width);
2927 pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2928 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2930 if (hws->funcs.setup_vupdate_interrupt)
2931 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2933 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2936 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2937 dcn10_enable_plane(dc, pipe_ctx, context);
2939 dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2941 hws->funcs.set_hdr_multiplier(pipe_ctx);
2943 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2944 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2945 pipe_ctx->plane_state->update_flags.bits.gamma_change)
2946 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2948 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
2949 * only do gamma programming for full update.
2950 * TODO: This can be further optimized/cleaned up
2951 * Always call this for now since it does memcmp inside before
2952 * doing heavy calculation and programming
2954 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2955 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2958 void dcn10_wait_for_pending_cleared(struct dc *dc,
2959 struct dc_state *context)
2961 struct pipe_ctx *pipe_ctx;
2962 struct timing_generator *tg;
2965 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2966 pipe_ctx = &context->res_ctx.pipe_ctx[i];
2967 tg = pipe_ctx->stream_res.tg;
2970 * Only wait for top pipe's tg penindg bit
2971 * Also skip if pipe is disabled.
2973 if (pipe_ctx->top_pipe ||
2974 !pipe_ctx->stream || !pipe_ctx->plane_state ||
2975 !tg->funcs->is_tg_enabled(tg))
2979 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
2980 * For some reason waiting for OTG_UPDATE_PENDING cleared
2981 * seems to not trigger the update right away, and if we
2982 * lock again before VUPDATE then we don't get a separated
2985 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
2986 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
2990 void dcn10_post_unlock_program_front_end(
2992 struct dc_state *context)
2996 DC_LOGGER_INIT(dc->ctx->logger);
2998 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2999 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3001 if (!pipe_ctx->top_pipe &&
3002 !pipe_ctx->prev_odm_pipe &&
3004 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3006 if (context->stream_status[i].plane_count == 0)
3007 false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
3011 for (i = 0; i < dc->res_pool->pipe_count; i++)
3012 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
3013 dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
3015 for (i = 0; i < dc->res_pool->pipe_count; i++)
3016 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
3017 dc->hwss.optimize_bandwidth(dc, context);
3021 if (dc->hwseq->wa.DEGVIDCN10_254)
3022 hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
3025 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
3029 for (i = 0; i < context->stream_count; i++) {
3030 if (context->streams[i]->timing.timing_3d_format
3031 == TIMING_3D_FORMAT_HW_FRAME_PACKING) {
3035 hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
3041 void dcn10_prepare_bandwidth(
3043 struct dc_state *context)
3045 struct dce_hwseq *hws = dc->hwseq;
3046 struct hubbub *hubbub = dc->res_pool->hubbub;
3047 int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3049 if (dc->debug.sanity_checks)
3050 hws->funcs.verify_allow_pstate_change_high(dc);
3052 if (context->stream_count == 0)
3053 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3055 dc->clk_mgr->funcs->update_clocks(
3060 dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
3061 &context->bw_ctx.bw.dcn.watermarks,
3062 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3064 dcn10_stereo_hw_frame_pack_wa(dc, context);
3066 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3069 dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3071 dcn_bw_notify_pplib_of_wm_ranges(
3072 dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3075 if (dc->debug.sanity_checks)
3076 hws->funcs.verify_allow_pstate_change_high(dc);
3079 void dcn10_optimize_bandwidth(
3081 struct dc_state *context)
3083 struct dce_hwseq *hws = dc->hwseq;
3084 struct hubbub *hubbub = dc->res_pool->hubbub;
3085 int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3087 if (dc->debug.sanity_checks)
3088 hws->funcs.verify_allow_pstate_change_high(dc);
3090 if (context->stream_count == 0)
3091 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3093 dc->clk_mgr->funcs->update_clocks(
3098 hubbub->funcs->program_watermarks(hubbub,
3099 &context->bw_ctx.bw.dcn.watermarks,
3100 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3103 dcn10_stereo_hw_frame_pack_wa(dc, context);
3105 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3108 dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3110 dcn_bw_notify_pplib_of_wm_ranges(
3111 dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3114 if (dc->debug.sanity_checks)
3115 hws->funcs.verify_allow_pstate_change_high(dc);
3118 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3119 int num_pipes, struct dc_crtc_timing_adjust adjust)
3122 struct drr_params params = {0};
3123 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3124 unsigned int event_triggers = 0x800;
3125 // Note DRR trigger events are generated regardless of whether num frames met.
3126 unsigned int num_frames = 2;
3128 params.vertical_total_max = adjust.v_total_max;
3129 params.vertical_total_min = adjust.v_total_min;
3130 params.vertical_total_mid = adjust.v_total_mid;
3131 params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3132 /* TODO: If multiple pipes are to be supported, you need
3133 * some GSL stuff. Static screen triggers may be programmed differently
3136 for (i = 0; i < num_pipes; i++) {
3137 if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
3138 if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
3139 pipe_ctx[i]->stream_res.tg->funcs->set_drr(
3140 pipe_ctx[i]->stream_res.tg, ¶ms);
3141 if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3142 if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
3143 pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
3144 pipe_ctx[i]->stream_res.tg,
3145 event_triggers, num_frames);
3150 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3152 struct crtc_position *position)
3156 /* TODO: handle pipes > 1
3158 for (i = 0; i < num_pipes; i++)
3159 pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3162 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3163 int num_pipes, const struct dc_static_screen_params *params)
3166 unsigned int triggers = 0;
3168 if (params->triggers.surface_update)
3170 if (params->triggers.cursor_update)
3172 if (params->triggers.force_trigger)
3175 for (i = 0; i < num_pipes; i++)
3176 pipe_ctx[i]->stream_res.tg->funcs->
3177 set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3178 triggers, params->num_frames);
3181 static void dcn10_config_stereo_parameters(
3182 struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3184 enum view_3d_format view_format = stream->view_format;
3185 enum dc_timing_3d_format timing_3d_format =\
3186 stream->timing.timing_3d_format;
3187 bool non_stereo_timing = false;
3189 if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3190 timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3191 timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3192 non_stereo_timing = true;
3194 if (non_stereo_timing == false &&
3195 view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3197 flags->PROGRAM_STEREO = 1;
3198 flags->PROGRAM_POLARITY = 1;
3199 if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
3200 timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3201 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3202 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3204 if (stream->link && stream->link->ddc) {
3205 enum display_dongle_type dongle = \
3206 stream->link->ddc->dongle_type;
3208 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3209 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3210 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3211 flags->DISABLE_STEREO_DP_SYNC = 1;
3214 flags->RIGHT_EYE_POLARITY =\
3215 stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3216 if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3217 flags->FRAME_PACKED = 1;
3223 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3225 struct crtc_stereo_flags flags = { 0 };
3226 struct dc_stream_state *stream = pipe_ctx->stream;
3228 dcn10_config_stereo_parameters(stream, &flags);
3230 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3231 if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3232 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3234 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3237 pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3238 pipe_ctx->stream_res.opp,
3239 flags.PROGRAM_STEREO == 1,
3242 pipe_ctx->stream_res.tg->funcs->program_stereo(
3243 pipe_ctx->stream_res.tg,
3250 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3254 for (i = 0; i < res_pool->pipe_count; i++) {
3255 if (res_pool->hubps[i]->inst == mpcc_inst)
3256 return res_pool->hubps[i];
3262 void dcn10_wait_for_mpcc_disconnect(
3264 struct resource_pool *res_pool,
3265 struct pipe_ctx *pipe_ctx)
3267 struct dce_hwseq *hws = dc->hwseq;
3270 if (dc->debug.sanity_checks) {
3271 hws->funcs.verify_allow_pstate_change_high(dc);
3274 if (!pipe_ctx->stream_res.opp)
3277 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3278 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3279 struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3281 if (pipe_ctx->stream_res.tg &&
3282 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
3283 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3284 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3285 hubp->funcs->set_blank(hubp, true);
3289 if (dc->debug.sanity_checks) {
3290 hws->funcs.verify_allow_pstate_change_high(dc);
3295 bool dcn10_dummy_display_power_gating(
3297 uint8_t controller_id,
3298 struct dc_bios *dcb,
3299 enum pipe_gating_control power_gating)
3304 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3306 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3307 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3309 struct dc *dc = pipe_ctx->stream->ctx->dc;
3311 if (plane_state == NULL)
3314 flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3315 pipe_ctx->plane_res.hubp);
3317 plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3320 plane_state->status.current_address = plane_state->status.requested_address;
3322 if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3323 tg->funcs->is_stereo_left_eye) {
3324 plane_state->status.is_right_eye =
3325 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3328 if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3329 struct dce_hwseq *hwseq = dc->hwseq;
3330 struct timing_generator *tg = dc->res_pool->timing_generators[0];
3331 unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3333 if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3334 struct hubbub *hubbub = dc->res_pool->hubbub;
3336 hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3337 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3342 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3344 struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3346 /* In DCN, this programming sequence is owned by the hubbub */
3347 hubbub->funcs->update_dchub(hubbub, dh_data);
3350 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3352 struct pipe_ctx *test_pipe, *split_pipe;
3353 const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
3354 struct rect r1 = scl_data->recout, r2, r2_half;
3355 int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
3356 int cur_layer = pipe_ctx->plane_state->layer_index;
3359 * Disable the cursor if there's another pipe above this with a
3360 * plane that contains this pipe's viewport to prevent double cursor
3361 * and incorrect scaling artifacts.
3363 for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3364 test_pipe = test_pipe->top_pipe) {
3365 // Skip invisible layer and pipe-split plane on same layer
3366 if (!test_pipe->plane_state ||
3367 !test_pipe->plane_state->visible ||
3368 test_pipe->plane_state->layer_index == cur_layer)
3371 r2 = test_pipe->plane_res.scl_data.recout;
3372 r2_r = r2.x + r2.width;
3373 r2_b = r2.y + r2.height;
3374 split_pipe = test_pipe;
3377 * There is another half plane on same layer because of
3378 * pipe-split, merge together per same height.
3380 for (split_pipe = pipe_ctx->top_pipe; split_pipe;
3381 split_pipe = split_pipe->top_pipe)
3382 if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
3383 r2_half = split_pipe->plane_res.scl_data.recout;
3384 r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
3385 r2.width = r2.width + r2_half.width;
3386 r2_r = r2.x + r2.width;
3390 if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
3397 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3399 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3400 struct hubp *hubp = pipe_ctx->plane_res.hubp;
3401 struct dpp *dpp = pipe_ctx->plane_res.dpp;
3402 struct dc_cursor_mi_param param = {
3403 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3404 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3405 .viewport = pipe_ctx->plane_res.scl_data.viewport,
3406 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3407 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3408 .rotation = pipe_ctx->plane_state->rotation,
3409 .mirror = pipe_ctx->plane_state->horizontal_mirror
3411 bool pipe_split_on = false;
3412 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3413 (pipe_ctx->prev_odm_pipe != NULL);
3415 int x_plane = pipe_ctx->plane_state->dst_rect.x;
3416 int y_plane = pipe_ctx->plane_state->dst_rect.y;
3417 int x_pos = pos_cpy.x;
3418 int y_pos = pos_cpy.y;
3420 if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
3421 if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
3422 (pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
3423 pipe_split_on = true;
3428 * DC cursor is stream space, HW cursor is plane space and drawn
3429 * as part of the framebuffer.
3431 * Cursor position can't be negative, but hotspot can be used to
3432 * shift cursor out of the plane bounds. Hotspot must be smaller
3433 * than the cursor size.
3437 * Translate cursor from stream space to plane space.
3439 * If the cursor is scaled then we need to scale the position
3440 * to be in the approximately correct place. We can't do anything
3441 * about the actual size being incorrect, that's a limitation of
3444 if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3445 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3446 pipe_ctx->plane_state->dst_rect.width;
3447 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3448 pipe_ctx->plane_state->dst_rect.height;
3450 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3451 pipe_ctx->plane_state->dst_rect.width;
3452 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3453 pipe_ctx->plane_state->dst_rect.height;
3457 * If the cursor's source viewport is clipped then we need to
3458 * translate the cursor to appear in the correct position on
3461 * This translation isn't affected by scaling so it needs to be
3462 * done *after* we adjust the position for the scale factor.
3464 * This is only done by opt-in for now since there are still
3465 * some usecases like tiled display that might enable the
3466 * cursor on both streams while expecting dc to clip it.
3468 if (pos_cpy.translate_by_source) {
3469 x_pos += pipe_ctx->plane_state->src_rect.x;
3470 y_pos += pipe_ctx->plane_state->src_rect.y;
3474 * If the position is negative then we need to add to the hotspot
3475 * to shift the cursor outside the plane.
3479 pos_cpy.x_hotspot -= x_pos;
3484 pos_cpy.y_hotspot -= y_pos;
3488 pos_cpy.x = (uint32_t)x_pos;
3489 pos_cpy.y = (uint32_t)y_pos;
3491 if (pipe_ctx->plane_state->address.type
3492 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3493 pos_cpy.enable = false;
3495 if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3496 pos_cpy.enable = false;
3499 if (param.rotation == ROTATION_ANGLE_0) {
3500 int viewport_width =
3501 pipe_ctx->plane_res.scl_data.viewport.width;
3503 pipe_ctx->plane_res.scl_data.viewport.x;
3506 if (pipe_split_on || odm_combine_on) {
3507 if (pos_cpy.x >= viewport_width + viewport_x) {
3508 pos_cpy.x = 2 * viewport_width
3509 - pos_cpy.x + 2 * viewport_x;
3511 uint32_t temp_x = pos_cpy.x;
3513 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3514 if (temp_x >= viewport_x +
3515 (int)hubp->curs_attr.width || pos_cpy.x
3516 <= (int)hubp->curs_attr.width +
3517 pipe_ctx->plane_state->src_rect.x) {
3518 pos_cpy.x = temp_x + viewport_width;
3522 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3526 // Swap axis and mirror horizontally
3527 else if (param.rotation == ROTATION_ANGLE_90) {
3528 uint32_t temp_x = pos_cpy.x;
3530 pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3531 (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3534 // Swap axis and mirror vertically
3535 else if (param.rotation == ROTATION_ANGLE_270) {
3536 uint32_t temp_y = pos_cpy.y;
3537 int viewport_height =
3538 pipe_ctx->plane_res.scl_data.viewport.height;
3540 pipe_ctx->plane_res.scl_data.viewport.y;
3543 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3544 * For pipe split cases:
3545 * - apply offset of viewport.y to normalize pos_cpy.x
3546 * - calculate the pos_cpy.y as before
3547 * - shift pos_cpy.y back by same offset to get final value
3548 * - since we iterate through both pipes, use the lower
3549 * viewport.y for offset
3550 * For non pipe split cases, use the same calculation for
3551 * pos_cpy.y as the 180 degree rotation case below,
3552 * but use pos_cpy.x as our input because we are rotating
3555 if (pipe_split_on || odm_combine_on) {
3556 int pos_cpy_x_offset;
3557 int other_pipe_viewport_y;
3559 if (pipe_split_on) {
3560 if (pipe_ctx->bottom_pipe) {
3561 other_pipe_viewport_y =
3562 pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3564 other_pipe_viewport_y =
3565 pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3568 if (pipe_ctx->next_odm_pipe) {
3569 other_pipe_viewport_y =
3570 pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3572 other_pipe_viewport_y =
3573 pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3576 pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3577 other_pipe_viewport_y : viewport_y;
3578 pos_cpy.x -= pos_cpy_x_offset;
3579 if (pos_cpy.x > viewport_height) {
3580 pos_cpy.x = pos_cpy.x - viewport_height;
3581 pos_cpy.y = viewport_height - pos_cpy.x;
3583 pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3585 pos_cpy.y += pos_cpy_x_offset;
3587 pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3591 // Mirror horizontally and vertically
3592 else if (param.rotation == ROTATION_ANGLE_180) {
3593 int viewport_width =
3594 pipe_ctx->plane_res.scl_data.viewport.width;
3596 pipe_ctx->plane_res.scl_data.viewport.x;
3598 if (!param.mirror) {
3599 if (pipe_split_on || odm_combine_on) {
3600 if (pos_cpy.x >= viewport_width + viewport_x) {
3601 pos_cpy.x = 2 * viewport_width
3602 - pos_cpy.x + 2 * viewport_x;
3604 uint32_t temp_x = pos_cpy.x;
3606 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3607 if (temp_x >= viewport_x +
3608 (int)hubp->curs_attr.width || pos_cpy.x
3609 <= (int)hubp->curs_attr.width +
3610 pipe_ctx->plane_state->src_rect.x) {
3611 pos_cpy.x = 2 * viewport_width - temp_x;
3615 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3620 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3622 * delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3623 * pos_cpy.y_new = viewport.y + delta_from_bottom
3625 * pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3627 pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3628 pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3631 hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
3632 dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height);
3635 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3637 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3639 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3640 pipe_ctx->plane_res.hubp, attributes);
3641 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3642 pipe_ctx->plane_res.dpp, attributes);
3645 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3647 uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3648 struct fixed31_32 multiplier;
3649 struct dpp_cursor_attributes opt_attr = { 0 };
3650 uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3651 struct custom_float_format fmt;
3653 if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3656 fmt.exponenta_bits = 5;
3657 fmt.mantissa_bits = 10;
3660 if (sdr_white_level > 80) {
3661 multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3662 convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3665 opt_attr.scale = hw_scale;
3668 pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3669 pipe_ctx->plane_res.dpp, &opt_attr);
3673 * apply_front_porch_workaround TODO FPGA still need?
3675 * This is a workaround for a bug that has existed since R5xx and has not been
3676 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3678 static void apply_front_porch_workaround(
3679 struct dc_crtc_timing *timing)
3681 if (timing->flags.INTERLACE == 1) {
3682 if (timing->v_front_porch < 2)
3683 timing->v_front_porch = 2;
3685 if (timing->v_front_porch < 1)
3686 timing->v_front_porch = 1;
3690 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3692 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3693 struct dc_crtc_timing patched_crtc_timing;
3694 int vesa_sync_start;
3696 int interlace_factor;
3698 patched_crtc_timing = *dc_crtc_timing;
3699 apply_front_porch_workaround(&patched_crtc_timing);
3701 interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3703 vesa_sync_start = patched_crtc_timing.v_addressable +
3704 patched_crtc_timing.v_border_bottom +
3705 patched_crtc_timing.v_front_porch;
3707 asic_blank_end = (patched_crtc_timing.v_total -
3709 patched_crtc_timing.v_border_top)
3712 return asic_blank_end -
3713 pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3716 void dcn10_calc_vupdate_position(
3718 struct pipe_ctx *pipe_ctx,
3719 uint32_t *start_line,
3722 const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3723 int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3725 if (vupdate_pos >= 0)
3726 *start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
3728 *start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
3729 *end_line = (*start_line + 2) % timing->v_total;
3732 static void dcn10_cal_vline_position(
3734 struct pipe_ctx *pipe_ctx,
3735 uint32_t *start_line,
3738 const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3739 int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
3741 if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
3744 else if (vline_pos < 0)
3747 vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3749 *start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
3751 *start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
3752 *end_line = (*start_line + 2) % timing->v_total;
3753 } else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
3754 // vsync is line 0 so start_line is just the requested line offset
3755 *start_line = vline_pos;
3756 *end_line = (*start_line + 2) % timing->v_total;
3761 void dcn10_setup_periodic_interrupt(
3763 struct pipe_ctx *pipe_ctx)
3765 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3766 uint32_t start_line = 0;
3767 uint32_t end_line = 0;
3769 dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
3771 tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3774 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3776 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3777 int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3779 if (start_line < 0) {
3784 if (tg->funcs->setup_vertical_interrupt2)
3785 tg->funcs->setup_vertical_interrupt2(tg, start_line);
3788 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3789 struct dc_link_settings *link_settings)
3791 struct encoder_unblank_param params = {0};
3792 struct dc_stream_state *stream = pipe_ctx->stream;
3793 struct dc_link *link = stream->link;
3794 struct dce_hwseq *hws = link->dc->hwseq;
3796 /* only 3 items below are used by unblank */
3797 params.timing = pipe_ctx->stream->timing;
3799 params.link_settings.link_rate = link_settings->link_rate;
3801 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3802 if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3803 params.timing.pix_clk_100hz /= 2;
3804 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms);
3807 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3808 hws->funcs.edp_backlight_control(link, true);
3812 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3813 const uint8_t *custom_sdp_message,
3814 unsigned int sdp_message_size)
3816 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3817 pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3818 pipe_ctx->stream_res.stream_enc,
3823 enum dc_status dcn10_set_clock(struct dc *dc,
3824 enum dc_clock_type clock_type,
3828 struct dc_state *context = dc->current_state;
3829 struct dc_clock_config clock_cfg = {0};
3830 struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3832 if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3833 return DC_FAIL_UNSUPPORTED_1;
3835 dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3836 context, clock_type, &clock_cfg);
3838 if (clk_khz > clock_cfg.max_clock_khz)
3839 return DC_FAIL_CLK_EXCEED_MAX;
3841 if (clk_khz < clock_cfg.min_clock_khz)
3842 return DC_FAIL_CLK_BELOW_MIN;
3844 if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3845 return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3847 /*update internal request clock for update clock use*/
3848 if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3849 current_clocks->dispclk_khz = clk_khz;
3850 else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3851 current_clocks->dppclk_khz = clk_khz;
3853 return DC_ERROR_UNEXPECTED;
3855 if (dc->clk_mgr->funcs->update_clocks)
3856 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3862 void dcn10_get_clock(struct dc *dc,
3863 enum dc_clock_type clock_type,
3864 struct dc_clock_config *clock_cfg)
3866 struct dc_state *context = dc->current_state;
3868 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3869 dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3873 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3875 struct resource_pool *pool = dc->res_pool;
3878 for (i = 0; i < pool->pipe_count; i++) {
3879 struct hubp *hubp = pool->hubps[i];
3880 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3882 hubp->funcs->hubp_read_state(hubp);
3885 dcc_en_bits[i] = s->dcc_en ? 1 : 0;