2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dc_link_dp.h"
51 #include "link_hwss.h"
52 #include "dpcd_defs.h"
54 #include "dce/dmub_psr.h"
55 #include "dc_dmub_srv.h"
56 #include "dce/dmub_hw_lock_mgr.h"
58 #include "dce/dmub_outbox.h"
59 #include "inc/dc_link_dp.h"
60 #include "inc/link_dpcd.h"
62 #define DC_LOGGER_INIT(logger)
70 #define FN(reg_name, field_name) \
71 hws->shifts->field_name, hws->masks->field_name
73 /*print is 17 wide, first two characters are spaces*/
74 #define DTN_INFO_MICRO_SEC(ref_cycle) \
75 print_microsec(dc_ctx, log_ctx, ref_cycle)
77 #define GAMMA_HW_POINTS_NUM 256
79 #define PGFSM_POWER_ON 0
80 #define PGFSM_POWER_OFF 2
82 static void print_microsec(struct dc_context *dc_ctx,
83 struct dc_log_buffer_ctx *log_ctx,
86 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
87 static const unsigned int frac = 1000;
88 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
90 DTN_INFO(" %11d.%03d",
95 void dcn10_lock_all_pipes(struct dc *dc,
96 struct dc_state *context,
99 struct pipe_ctx *pipe_ctx;
100 struct timing_generator *tg;
103 for (i = 0; i < dc->res_pool->pipe_count; i++) {
104 pipe_ctx = &context->res_ctx.pipe_ctx[i];
105 tg = pipe_ctx->stream_res.tg;
108 * Only lock the top pipe's tg to prevent redundant
109 * (un)locking. Also skip if pipe is disabled.
111 if (pipe_ctx->top_pipe ||
113 !pipe_ctx->plane_state ||
114 !tg->funcs->is_tg_enabled(tg))
118 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
120 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
124 static void log_mpc_crc(struct dc *dc,
125 struct dc_log_buffer_ctx *log_ctx)
127 struct dc_context *dc_ctx = dc->ctx;
128 struct dce_hwseq *hws = dc->hwseq;
130 if (REG(MPC_CRC_RESULT_GB))
131 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
132 REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
133 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
134 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
135 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
138 static void dcn10_log_hubbub_state(struct dc *dc,
139 struct dc_log_buffer_ctx *log_ctx)
141 struct dc_context *dc_ctx = dc->ctx;
142 struct dcn_hubbub_wm wm;
145 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
146 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
148 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
149 " sr_enter sr_exit dram_clk_change\n");
151 for (i = 0; i < 4; i++) {
152 struct dcn_hubbub_wm_set *s;
155 DTN_INFO("WM_Set[%d]:", s->wm_set);
156 DTN_INFO_MICRO_SEC(s->data_urgent);
157 DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
158 DTN_INFO_MICRO_SEC(s->sr_enter);
159 DTN_INFO_MICRO_SEC(s->sr_exit);
160 DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
167 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
169 struct dc_context *dc_ctx = dc->ctx;
170 struct resource_pool *pool = dc->res_pool;
174 "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
175 for (i = 0; i < pool->pipe_count; i++) {
176 struct hubp *hubp = pool->hubps[i];
177 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
179 hubp->funcs->hubp_read_state(hubp);
182 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
195 s->underflow_status);
196 DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
197 DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
198 DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
203 DTN_INFO("\n=========RQ========\n");
204 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
205 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
206 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
207 for (i = 0; i < pool->pipe_count; i++) {
208 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
209 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
212 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
213 pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
214 rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
215 rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
216 rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
217 rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
218 rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
219 rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
220 rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
221 rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
224 DTN_INFO("========DLG========\n");
225 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
226 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
227 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
228 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
229 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
230 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
231 " x_rp_dlay x_rr_sfl\n");
232 for (i = 0; i < pool->pipe_count; i++) {
233 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
234 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
237 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
238 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
239 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
240 pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
241 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
242 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
243 dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
244 dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
245 dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
246 dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
247 dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
248 dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
249 dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
250 dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
251 dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
252 dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
253 dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
254 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
255 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
256 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
257 dlg_regs->xfc_reg_remote_surface_flip_latency);
260 DTN_INFO("========TTU========\n");
261 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
262 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
263 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
264 for (i = 0; i < pool->pipe_count; i++) {
265 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
266 struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
269 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
270 pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
271 ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
272 ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
273 ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
274 ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
275 ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
276 ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
281 void dcn10_log_hw_state(struct dc *dc,
282 struct dc_log_buffer_ctx *log_ctx)
284 struct dc_context *dc_ctx = dc->ctx;
285 struct resource_pool *pool = dc->res_pool;
290 dcn10_log_hubbub_state(dc, log_ctx);
292 dcn10_log_hubp_states(dc, log_ctx);
294 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
295 " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
296 "C31 C32 C33 C34\n");
297 for (i = 0; i < pool->pipe_count; i++) {
298 struct dpp *dpp = pool->dpps[i];
299 struct dcn_dpp_state s = {0};
301 dpp->funcs->dpp_read_state(dpp, &s);
306 DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
307 "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
310 (s.igam_lut_mode == 0) ? "BypassFixed" :
311 ((s.igam_lut_mode == 1) ? "BypassFloat" :
312 ((s.igam_lut_mode == 2) ? "RAM" :
313 ((s.igam_lut_mode == 3) ? "RAM" :
315 (s.dgam_lut_mode == 0) ? "Bypass" :
316 ((s.dgam_lut_mode == 1) ? "sRGB" :
317 ((s.dgam_lut_mode == 2) ? "Ycc" :
318 ((s.dgam_lut_mode == 3) ? "RAM" :
319 ((s.dgam_lut_mode == 4) ? "RAM" :
321 (s.rgam_lut_mode == 0) ? "Bypass" :
322 ((s.rgam_lut_mode == 1) ? "sRGB" :
323 ((s.rgam_lut_mode == 2) ? "Ycc" :
324 ((s.rgam_lut_mode == 3) ? "RAM" :
325 ((s.rgam_lut_mode == 4) ? "RAM" :
328 s.gamut_remap_c11_c12,
329 s.gamut_remap_c13_c14,
330 s.gamut_remap_c21_c22,
331 s.gamut_remap_c23_c24,
332 s.gamut_remap_c31_c32,
333 s.gamut_remap_c33_c34);
338 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
339 for (i = 0; i < pool->pipe_count; i++) {
340 struct mpcc_state s = {0};
342 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
344 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
345 i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
346 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
351 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
353 for (i = 0; i < pool->timing_generator_count; i++) {
354 struct timing_generator *tg = pool->timing_generators[i];
355 struct dcn_otg_state s = {0};
356 /* Read shared OTG state registers for all DCNx */
357 optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
360 * For DCN2 and greater, a register on the OPP is used to
361 * determine if the CRTC is blanked instead of the OTG. So use
362 * dpg_is_blanked() if exists, otherwise fallback on otg.
364 * TODO: Implement DCN-specific read_otg_state hooks.
366 if (pool->opps[i]->funcs->dpg_is_blanked)
367 s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
369 s.blank_enabled = tg->funcs->is_blanked(tg);
371 //only print if OTG master is enabled
372 if ((s.otg_enabled & 1) == 0)
375 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
393 s.underflow_occurred_status,
396 // Clear underflow for debug purposes
397 // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
398 // This function is called only from Windows or Diags test environment, hence it's safe to clear
399 // it from here without affecting the original intent.
400 tg->funcs->clear_optc_underflow(tg);
404 // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
405 // TODO: Update golden log header to reflect this name change
406 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
407 for (i = 0; i < pool->res_cap->num_dsc; i++) {
408 struct display_stream_compressor *dsc = pool->dscs[i];
409 struct dcn_dsc_state s = {0};
411 dsc->funcs->dsc_read_state(dsc, &s);
412 DTN_INFO("[%d]: %-9d %-12d %-10d\n",
416 s.dsc_bits_per_pixel);
421 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
422 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
423 for (i = 0; i < pool->stream_enc_count; i++) {
424 struct stream_encoder *enc = pool->stream_enc[i];
425 struct enc_state s = {0};
427 if (enc->funcs->enc_read_state) {
428 enc->funcs->enc_read_state(enc, &s);
429 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
432 s.sec_gsp_pps_line_num,
433 s.vbid6_line_reference,
435 s.sec_gsp_pps_enable,
436 s.sec_stream_enable);
442 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
443 for (i = 0; i < dc->link_count; i++) {
444 struct link_encoder *lenc = dc->links[i]->link_enc;
446 struct link_enc_state s = {0};
448 if (lenc && lenc->funcs->read_state) {
449 lenc->funcs->read_state(lenc, &s);
450 DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
453 s.dphy_fec_ready_shadow,
454 s.dphy_fec_active_status,
455 s.dp_link_training_complete);
461 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
462 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
463 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
464 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
465 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
466 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
467 dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
468 dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
469 dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
471 log_mpc_crc(dc, log_ctx);
474 if (pool->hpo_dp_stream_enc_count > 0) {
475 DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n");
476 for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
477 struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
478 struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
480 if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
481 hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
483 DTN_INFO("[%d]: %d %d %6s %d %d %d %d %d\n",
484 hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
485 hpo_dp_se_state.stream_enc_enabled,
486 hpo_dp_se_state.otg_inst,
487 (hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
488 ((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
489 (hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
490 (hpo_dp_se_state.component_depth == 0) ? 6 :
491 ((hpo_dp_se_state.component_depth == 1) ? 8 :
492 (hpo_dp_se_state.component_depth == 2) ? 10 : 12),
493 hpo_dp_se_state.vid_stream_enabled,
494 hpo_dp_se_state.sdp_enabled,
495 hpo_dp_se_state.compressed_format,
496 hpo_dp_se_state.mapped_to_link_enc);
503 /* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
504 if (pool->hpo_dp_link_enc_count) {
505 DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n");
507 for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
508 struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
509 struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
511 if (hpo_dp_link_enc->funcs->read_state) {
512 hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
513 DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n",
514 hpo_dp_link_enc->inst,
515 hpo_dp_le_state.link_enc_enabled,
516 (hpo_dp_le_state.link_mode == 0) ? "TPS1" :
517 (hpo_dp_le_state.link_mode == 1) ? "TPS2" :
518 (hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
519 hpo_dp_le_state.lane_count,
520 hpo_dp_le_state.stream_src[0],
521 hpo_dp_le_state.slot_count[0],
522 hpo_dp_le_state.vc_rate_x[0],
523 hpo_dp_le_state.vc_rate_y[0]);
535 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
537 struct hubp *hubp = pipe_ctx->plane_res.hubp;
538 struct timing_generator *tg = pipe_ctx->stream_res.tg;
540 if (tg->funcs->is_optc_underflow_occurred(tg)) {
541 tg->funcs->clear_optc_underflow(tg);
545 if (hubp->funcs->hubp_get_underflow_status(hubp)) {
546 hubp->funcs->hubp_clear_underflow(hubp);
552 void dcn10_enable_power_gating_plane(
553 struct dce_hwseq *hws,
556 bool force_on = true; /* disable power gating */
562 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
563 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
564 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
565 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
568 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
569 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
570 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
571 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
574 void dcn10_disable_vga(
575 struct dce_hwseq *hws)
577 unsigned int in_vga1_mode = 0;
578 unsigned int in_vga2_mode = 0;
579 unsigned int in_vga3_mode = 0;
580 unsigned int in_vga4_mode = 0;
582 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
583 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
584 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
585 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
587 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
588 in_vga3_mode == 0 && in_vga4_mode == 0)
591 REG_WRITE(D1VGA_CONTROL, 0);
592 REG_WRITE(D2VGA_CONTROL, 0);
593 REG_WRITE(D3VGA_CONTROL, 0);
594 REG_WRITE(D4VGA_CONTROL, 0);
596 /* HW Engineer's Notes:
597 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
598 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
600 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
601 * VGA_TEST_ENABLE, to leave it in the same state as before.
603 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
604 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
608 * dcn10_dpp_pg_control - DPP power gate control.
610 * @hws: dce_hwseq reference.
611 * @dpp_inst: DPP instance reference.
612 * @power_on: true if we want to enable power gate, false otherwise.
614 * Enable or disable power gate in the specific DPP instance.
616 void dcn10_dpp_pg_control(
617 struct dce_hwseq *hws,
618 unsigned int dpp_inst,
621 uint32_t power_gate = power_on ? 0 : 1;
622 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
624 if (hws->ctx->dc->debug.disable_dpp_power_gate)
626 if (REG(DOMAIN1_PG_CONFIG) == 0)
631 REG_UPDATE(DOMAIN1_PG_CONFIG,
632 DOMAIN1_POWER_GATE, power_gate);
634 REG_WAIT(DOMAIN1_PG_STATUS,
635 DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
639 REG_UPDATE(DOMAIN3_PG_CONFIG,
640 DOMAIN3_POWER_GATE, power_gate);
642 REG_WAIT(DOMAIN3_PG_STATUS,
643 DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
647 REG_UPDATE(DOMAIN5_PG_CONFIG,
648 DOMAIN5_POWER_GATE, power_gate);
650 REG_WAIT(DOMAIN5_PG_STATUS,
651 DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
655 REG_UPDATE(DOMAIN7_PG_CONFIG,
656 DOMAIN7_POWER_GATE, power_gate);
658 REG_WAIT(DOMAIN7_PG_STATUS,
659 DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
669 * dcn10_hubp_pg_control - HUBP power gate control.
671 * @hws: dce_hwseq reference.
672 * @hubp_inst: DPP instance reference.
673 * @power_on: true if we want to enable power gate, false otherwise.
675 * Enable or disable power gate in the specific HUBP instance.
677 void dcn10_hubp_pg_control(
678 struct dce_hwseq *hws,
679 unsigned int hubp_inst,
682 uint32_t power_gate = power_on ? 0 : 1;
683 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
685 if (hws->ctx->dc->debug.disable_hubp_power_gate)
687 if (REG(DOMAIN0_PG_CONFIG) == 0)
691 case 0: /* DCHUBP0 */
692 REG_UPDATE(DOMAIN0_PG_CONFIG,
693 DOMAIN0_POWER_GATE, power_gate);
695 REG_WAIT(DOMAIN0_PG_STATUS,
696 DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
699 case 1: /* DCHUBP1 */
700 REG_UPDATE(DOMAIN2_PG_CONFIG,
701 DOMAIN2_POWER_GATE, power_gate);
703 REG_WAIT(DOMAIN2_PG_STATUS,
704 DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
707 case 2: /* DCHUBP2 */
708 REG_UPDATE(DOMAIN4_PG_CONFIG,
709 DOMAIN4_POWER_GATE, power_gate);
711 REG_WAIT(DOMAIN4_PG_STATUS,
712 DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
715 case 3: /* DCHUBP3 */
716 REG_UPDATE(DOMAIN6_PG_CONFIG,
717 DOMAIN6_POWER_GATE, power_gate);
719 REG_WAIT(DOMAIN6_PG_STATUS,
720 DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
729 static void power_on_plane(
730 struct dce_hwseq *hws,
733 DC_LOGGER_INIT(hws->ctx->logger);
734 if (REG(DC_IP_REQUEST_CNTL)) {
735 REG_SET(DC_IP_REQUEST_CNTL, 0,
738 if (hws->funcs.dpp_pg_control)
739 hws->funcs.dpp_pg_control(hws, plane_id, true);
741 if (hws->funcs.hubp_pg_control)
742 hws->funcs.hubp_pg_control(hws, plane_id, true);
744 REG_SET(DC_IP_REQUEST_CNTL, 0,
747 "Un-gated front end for pipe %d\n", plane_id);
751 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
753 struct dce_hwseq *hws = dc->hwseq;
754 struct hubp *hubp = dc->res_pool->hubps[0];
756 if (!hws->wa_state.DEGVIDCN10_253_applied)
759 hubp->funcs->set_blank(hubp, true);
761 REG_SET(DC_IP_REQUEST_CNTL, 0,
764 hws->funcs.hubp_pg_control(hws, 0, false);
765 REG_SET(DC_IP_REQUEST_CNTL, 0,
768 hws->wa_state.DEGVIDCN10_253_applied = false;
771 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
773 struct dce_hwseq *hws = dc->hwseq;
774 struct hubp *hubp = dc->res_pool->hubps[0];
777 if (dc->debug.disable_stutter)
780 if (!hws->wa.DEGVIDCN10_253)
783 for (i = 0; i < dc->res_pool->pipe_count; i++) {
784 if (!dc->res_pool->hubps[i]->power_gated)
788 /* all pipe power gated, apply work around to enable stutter. */
790 REG_SET(DC_IP_REQUEST_CNTL, 0,
793 hws->funcs.hubp_pg_control(hws, 0, true);
794 REG_SET(DC_IP_REQUEST_CNTL, 0,
797 hubp->funcs->set_hubp_blank_en(hubp, false);
798 hws->wa_state.DEGVIDCN10_253_applied = true;
801 void dcn10_bios_golden_init(struct dc *dc)
803 struct dce_hwseq *hws = dc->hwseq;
804 struct dc_bios *bp = dc->ctx->dc_bios;
806 bool allow_self_fresh_force_enable = true;
808 if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
811 if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
812 allow_self_fresh_force_enable =
813 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
816 /* WA for making DF sleep when idle after resume from S0i3.
817 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
818 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
819 * before calling command table and it changed to 1 after,
820 * it should be set back to 0.
823 /* initialize dcn global */
824 bp->funcs->enable_disp_power_gating(bp,
825 CONTROLLER_ID_D0, ASIC_PIPE_INIT);
827 for (i = 0; i < dc->res_pool->pipe_count; i++) {
828 /* initialize dcn per pipe */
829 bp->funcs->enable_disp_power_gating(bp,
830 CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
833 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
834 if (allow_self_fresh_force_enable == false &&
835 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
836 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
837 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
841 static void false_optc_underflow_wa(
843 const struct dc_stream_state *stream,
844 struct timing_generator *tg)
849 if (!dc->hwseq->wa.false_optc_underflow)
852 underflow = tg->funcs->is_optc_underflow_occurred(tg);
854 for (i = 0; i < dc->res_pool->pipe_count; i++) {
855 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
857 if (old_pipe_ctx->stream != stream)
860 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
863 if (tg->funcs->set_blank_data_double_buffer)
864 tg->funcs->set_blank_data_double_buffer(tg, true);
866 if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
867 tg->funcs->clear_optc_underflow(tg);
870 enum dc_status dcn10_enable_stream_timing(
871 struct pipe_ctx *pipe_ctx,
872 struct dc_state *context,
875 struct dc_stream_state *stream = pipe_ctx->stream;
876 enum dc_color_space color_space;
877 struct tg_color black_color = {0};
879 /* by upper caller loop, pipe0 is parent pipe and be called first.
880 * back end is set up by for pipe0. Other children pipe share back end
881 * with pipe 0. No program is needed.
883 if (pipe_ctx->top_pipe != NULL)
886 /* TODO check if timing_changed, disable stream if timing changed */
888 /* HW program guide assume display already disable
889 * by unplug sequence. OTG assume stop.
891 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
893 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
894 pipe_ctx->clock_source,
895 &pipe_ctx->stream_res.pix_clk_params,
896 dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
897 &pipe_ctx->pll_settings)) {
899 return DC_ERROR_UNEXPECTED;
902 pipe_ctx->stream_res.tg->funcs->program_timing(
903 pipe_ctx->stream_res.tg,
905 pipe_ctx->pipe_dlg_param.vready_offset,
906 pipe_ctx->pipe_dlg_param.vstartup_start,
907 pipe_ctx->pipe_dlg_param.vupdate_offset,
908 pipe_ctx->pipe_dlg_param.vupdate_width,
909 pipe_ctx->stream->signal,
912 #if 0 /* move to after enable_crtc */
913 /* TODO: OPP FMT, ABM. etc. should be done here. */
914 /* or FPGA now. instance 0 only. TODO: move to opp.c */
916 inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
918 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
919 pipe_ctx->stream_res.opp,
920 &stream->bit_depth_params,
923 /* program otg blank color */
924 color_space = stream->output_color_space;
925 color_space_to_black_color(dc, color_space, &black_color);
928 * The way 420 is packed, 2 channels carry Y component, 1 channel
929 * alternate between Cb and Cr, so both channels need the pixel
932 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
933 black_color.color_r_cr = black_color.color_g_y;
935 if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
936 pipe_ctx->stream_res.tg->funcs->set_blank_color(
937 pipe_ctx->stream_res.tg,
940 if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
941 !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
942 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
943 hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
944 false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
947 /* VTG is within DCHUB command block. DCFCLK is always on */
948 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
950 return DC_ERROR_UNEXPECTED;
953 /* TODO program crtc source select for non-virtual signal*/
954 /* TODO program FMT */
955 /* TODO setup link_enc */
956 /* TODO set stream attributes */
957 /* TODO program audio */
958 /* TODO enable stream if timing changed */
959 /* TODO unblank stream if DP */
964 static void dcn10_reset_back_end_for_pipe(
966 struct pipe_ctx *pipe_ctx,
967 struct dc_state *context)
970 struct dc_link *link;
971 DC_LOGGER_INIT(dc->ctx->logger);
972 if (pipe_ctx->stream_res.stream_enc == NULL) {
973 pipe_ctx->stream = NULL;
977 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
978 link = pipe_ctx->stream->link;
979 /* DPMS may already disable or */
980 /* dpms_off status is incorrect due to fastboot
981 * feature. When system resume from S4 with second
982 * screen only, the dpms_off would be true but
983 * VBIOS lit up eDP, so check link status too.
985 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
986 core_link_disable_stream(pipe_ctx);
987 else if (pipe_ctx->stream_res.audio)
988 dc->hwss.disable_audio_stream(pipe_ctx);
990 if (pipe_ctx->stream_res.audio) {
991 /*disable az_endpoint*/
992 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
995 if (dc->caps.dynamic_audio == true) {
996 /*we have to dynamic arbitrate the audio endpoints*/
997 /*we free the resource, need reset is_audio_acquired*/
998 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
999 pipe_ctx->stream_res.audio, false);
1000 pipe_ctx->stream_res.audio = NULL;
1005 /* by upper caller loop, parent pipe: pipe0, will be reset last.
1006 * back end share by all pipes and will be disable only when disable
1009 if (pipe_ctx->top_pipe == NULL) {
1011 if (pipe_ctx->stream_res.abm)
1012 dc->hwss.set_abm_immediate_disable(pipe_ctx);
1014 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1016 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1017 if (pipe_ctx->stream_res.tg->funcs->set_drr)
1018 pipe_ctx->stream_res.tg->funcs->set_drr(
1019 pipe_ctx->stream_res.tg, NULL);
1022 for (i = 0; i < dc->res_pool->pipe_count; i++)
1023 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1026 if (i == dc->res_pool->pipe_count)
1029 pipe_ctx->stream = NULL;
1030 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1031 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1034 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
1038 bool need_recover = true;
1040 if (!dc->debug.recovery_enabled)
1043 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1044 struct pipe_ctx *pipe_ctx =
1045 &dc->current_state->res_ctx.pipe_ctx[i];
1046 if (pipe_ctx != NULL) {
1047 hubp = pipe_ctx->plane_res.hubp;
1048 if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
1049 if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
1050 /* one pipe underflow, we will reset all the pipes*/
1051 need_recover = true;
1059 DCHUBP_CNTL:HUBP_BLANK_EN=1
1060 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1061 DCHUBP_CNTL:HUBP_DISABLE=1
1062 DCHUBP_CNTL:HUBP_DISABLE=0
1063 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1064 DCSURF_PRIMARY_SURFACE_ADDRESS
1065 DCHUBP_CNTL:HUBP_BLANK_EN=0
1068 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1069 struct pipe_ctx *pipe_ctx =
1070 &dc->current_state->res_ctx.pipe_ctx[i];
1071 if (pipe_ctx != NULL) {
1072 hubp = pipe_ctx->plane_res.hubp;
1073 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1074 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1075 hubp->funcs->set_hubp_blank_en(hubp, true);
1078 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1079 hubbub1_soft_reset(dc->res_pool->hubbub, true);
1081 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1082 struct pipe_ctx *pipe_ctx =
1083 &dc->current_state->res_ctx.pipe_ctx[i];
1084 if (pipe_ctx != NULL) {
1085 hubp = pipe_ctx->plane_res.hubp;
1086 /*DCHUBP_CNTL:HUBP_DISABLE=1*/
1087 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1088 hubp->funcs->hubp_disable_control(hubp, true);
1091 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1092 struct pipe_ctx *pipe_ctx =
1093 &dc->current_state->res_ctx.pipe_ctx[i];
1094 if (pipe_ctx != NULL) {
1095 hubp = pipe_ctx->plane_res.hubp;
1096 /*DCHUBP_CNTL:HUBP_DISABLE=0*/
1097 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1098 hubp->funcs->hubp_disable_control(hubp, true);
1101 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1102 hubbub1_soft_reset(dc->res_pool->hubbub, false);
1103 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1104 struct pipe_ctx *pipe_ctx =
1105 &dc->current_state->res_ctx.pipe_ctx[i];
1106 if (pipe_ctx != NULL) {
1107 hubp = pipe_ctx->plane_res.hubp;
1108 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1109 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1110 hubp->funcs->set_hubp_blank_en(hubp, true);
1117 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1119 struct hubbub *hubbub = dc->res_pool->hubbub;
1120 static bool should_log_hw_state; /* prevent hw state log by default */
1122 if (!hubbub->funcs->verify_allow_pstate_change_high)
1125 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
1128 if (should_log_hw_state)
1129 dcn10_log_hw_state(dc, NULL);
1131 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1132 BREAK_TO_DEBUGGER();
1133 if (dcn10_hw_wa_force_recovery(dc)) {
1135 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
1136 BREAK_TO_DEBUGGER();
1141 /* trigger HW to start disconnect plane from stream on the next vsync */
1142 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1144 struct dce_hwseq *hws = dc->hwseq;
1145 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1146 int dpp_id = pipe_ctx->plane_res.dpp->inst;
1147 struct mpc *mpc = dc->res_pool->mpc;
1148 struct mpc_tree *mpc_tree_params;
1149 struct mpcc *mpcc_to_remove = NULL;
1150 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1152 mpc_tree_params = &(opp->mpc_tree_params);
1153 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1156 if (mpcc_to_remove == NULL)
1159 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1160 // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
1161 // so don't wait for MPCC_IDLE in the programming sequence
1162 if (opp != NULL && !pipe_ctx->plane_state->is_phantom)
1163 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1165 dc->optimized_required = true;
1167 if (hubp->funcs->hubp_disconnect)
1168 hubp->funcs->hubp_disconnect(hubp);
1170 if (dc->debug.sanity_checks)
1171 hws->funcs.verify_allow_pstate_change_high(dc);
1175 * dcn10_plane_atomic_power_down - Power down plane components.
1177 * @dc: dc struct reference. used for grab hwseq.
1178 * @dpp: dpp struct reference.
1179 * @hubp: hubp struct reference.
1181 * Keep in mind that this operation requires a power gate configuration;
1182 * however, requests for switch power gate are precisely controlled to avoid
1183 * problems. For this reason, power gate request is usually disabled. This
1184 * function first needs to enable the power gate request before disabling DPP
1185 * and HUBP. Finally, it disables the power gate request again.
1187 void dcn10_plane_atomic_power_down(struct dc *dc,
1191 struct dce_hwseq *hws = dc->hwseq;
1192 DC_LOGGER_INIT(dc->ctx->logger);
1194 if (REG(DC_IP_REQUEST_CNTL)) {
1195 REG_SET(DC_IP_REQUEST_CNTL, 0,
1198 if (hws->funcs.dpp_pg_control)
1199 hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1201 if (hws->funcs.hubp_pg_control)
1202 hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1204 dpp->funcs->dpp_reset(dpp);
1205 REG_SET(DC_IP_REQUEST_CNTL, 0,
1208 "Power gated front end %d\n", hubp->inst);
1212 /* disable HW used by plane.
1213 * note: cannot disable until disconnect is complete
1215 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1217 struct dce_hwseq *hws = dc->hwseq;
1218 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1219 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1220 int opp_id = hubp->opp_id;
1222 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1224 hubp->funcs->hubp_clk_cntl(hubp, false);
1226 dpp->funcs->dpp_dppclk_control(dpp, false, false);
1228 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1229 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1230 pipe_ctx->stream_res.opp,
1233 hubp->power_gated = true;
1234 dc->optimized_required = false; /* We're powering off, no need to optimize */
1236 hws->funcs.plane_atomic_power_down(dc,
1237 pipe_ctx->plane_res.dpp,
1238 pipe_ctx->plane_res.hubp);
1240 pipe_ctx->stream = NULL;
1241 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1242 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1243 pipe_ctx->top_pipe = NULL;
1244 pipe_ctx->bottom_pipe = NULL;
1245 pipe_ctx->plane_state = NULL;
1248 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1250 struct dce_hwseq *hws = dc->hwseq;
1251 DC_LOGGER_INIT(dc->ctx->logger);
1253 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1256 hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1258 apply_DEGVIDCN10_253_wa(dc);
1260 DC_LOG_DC("Power down front end %d\n",
1261 pipe_ctx->pipe_idx);
1264 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1267 struct dce_hwseq *hws = dc->hwseq;
1268 struct hubbub *hubbub = dc->res_pool->hubbub;
1269 bool can_apply_seamless_boot = false;
1271 for (i = 0; i < context->stream_count; i++) {
1272 if (context->streams[i]->apply_seamless_boot_optimization) {
1273 can_apply_seamless_boot = true;
1278 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1279 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1280 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1282 /* There is assumption that pipe_ctx is not mapping irregularly
1283 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1284 * we will use the pipe, so don't disable
1286 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1289 /* Blank controller using driver code instead of
1292 if (tg->funcs->is_tg_enabled(tg)) {
1293 if (hws->funcs.init_blank != NULL) {
1294 hws->funcs.init_blank(dc, tg);
1295 tg->funcs->lock(tg);
1297 tg->funcs->lock(tg);
1298 tg->funcs->set_blank(tg, true);
1299 hwss_wait_for_blank_complete(tg);
1304 /* Reset det size */
1305 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1306 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1307 struct hubp *hubp = dc->res_pool->hubps[i];
1309 /* Do not need to reset for seamless boot */
1310 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1313 if (hubbub && hubp) {
1314 if (hubbub->funcs->program_det_size)
1315 hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
1319 /* num_opp will be equal to number of mpcc */
1320 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1321 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1323 /* Cannot reset the MPC mux if seamless boot */
1324 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1327 dc->res_pool->mpc->funcs->mpc_init_single_inst(
1328 dc->res_pool->mpc, i);
1331 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1332 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1333 struct hubp *hubp = dc->res_pool->hubps[i];
1334 struct dpp *dpp = dc->res_pool->dpps[i];
1335 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1337 /* There is assumption that pipe_ctx is not mapping irregularly
1338 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1339 * we will use the pipe, so don't disable
1341 if (can_apply_seamless_boot &&
1342 pipe_ctx->stream != NULL &&
1343 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1344 pipe_ctx->stream_res.tg)) {
1345 // Enable double buffering for OTG_BLANK no matter if
1346 // seamless boot is enabled or not to suppress global sync
1347 // signals when OTG blanked. This is to prevent pipe from
1348 // requesting data while in PSR.
1349 tg->funcs->tg_init(tg);
1350 hubp->power_gated = true;
1354 /* Disable on the current state so the new one isn't cleared. */
1355 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1357 dpp->funcs->dpp_reset(dpp);
1359 pipe_ctx->stream_res.tg = tg;
1360 pipe_ctx->pipe_idx = i;
1362 pipe_ctx->plane_res.hubp = hubp;
1363 pipe_ctx->plane_res.dpp = dpp;
1364 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1365 hubp->mpcc_id = dpp->inst;
1366 hubp->opp_id = OPP_ID_INVALID;
1367 hubp->power_gated = false;
1369 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1370 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1371 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1372 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1374 hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1376 if (tg->funcs->is_tg_enabled(tg))
1377 tg->funcs->unlock(tg);
1379 dc->hwss.disable_plane(dc, pipe_ctx);
1381 pipe_ctx->stream_res.tg = NULL;
1382 pipe_ctx->plane_res.hubp = NULL;
1384 if (tg->funcs->is_tg_enabled(tg)) {
1385 if (tg->funcs->init_odm)
1386 tg->funcs->init_odm(tg);
1389 tg->funcs->tg_init(tg);
1392 /* Power gate DSCs */
1393 if (hws->funcs.dsc_pg_control != NULL) {
1394 uint32_t num_opps = 0;
1395 uint32_t opp_id_src0 = OPP_ID_INVALID;
1396 uint32_t opp_id_src1 = OPP_ID_INVALID;
1398 // Step 1: To find out which OPTC is running & OPTC DSC is ON
1399 // We can't use res_pool->res_cap->num_timing_generator to check
1400 // Because it records display pipes default setting built in driver,
1401 // not display pipes of the current chip.
1402 // Some ASICs would be fused display pipes less than the default setting.
1403 // In dcnxx_resource_construct function, driver would obatin real information.
1404 for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1405 uint32_t optc_dsc_state = 0;
1406 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1408 if (tg->funcs->is_tg_enabled(tg)) {
1409 if (tg->funcs->get_dsc_status)
1410 tg->funcs->get_dsc_status(tg, &optc_dsc_state);
1411 // Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
1412 // non-zero value is DSC enabled
1413 if (optc_dsc_state != 0) {
1414 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1420 // Step 2: To power down DSC but skip DSC of running OPTC
1421 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1422 struct dcn_dsc_state s = {0};
1424 dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
1426 if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
1427 s.dsc_clock_en && s.dsc_fw_en)
1430 hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
1435 void dcn10_init_hw(struct dc *dc)
1438 struct abm *abm = dc->res_pool->abm;
1439 struct dmcu *dmcu = dc->res_pool->dmcu;
1440 struct dce_hwseq *hws = dc->hwseq;
1441 struct dc_bios *dcb = dc->ctx->dc_bios;
1442 struct resource_pool *res_pool = dc->res_pool;
1443 uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1444 bool is_optimized_init_done = false;
1446 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1447 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1449 /* Align bw context with hw config when system resume. */
1450 if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
1451 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
1452 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
1455 // Initialize the dccg
1456 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1457 dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1459 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1461 REG_WRITE(REFCLK_CNTL, 0);
1462 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1463 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1465 if (!dc->debug.disable_clock_gate) {
1466 /* enable all DCN clock gating */
1467 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1469 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1471 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1474 //Enable ability to power gate / don't force power on permanently
1475 if (hws->funcs.enable_power_gating_plane)
1476 hws->funcs.enable_power_gating_plane(hws, true);
1481 if (!dcb->funcs->is_accelerated_mode(dcb))
1482 hws->funcs.disable_vga(dc->hwseq);
1484 hws->funcs.bios_golden_init(dc);
1486 if (dc->ctx->dc_bios->fw_info_valid) {
1487 res_pool->ref_clocks.xtalin_clock_inKhz =
1488 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1490 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1491 if (res_pool->dccg && res_pool->hubbub) {
1493 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1494 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1495 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
1497 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1498 res_pool->ref_clocks.dccg_ref_clock_inKhz,
1499 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
1501 // Not all ASICs have DCCG sw component
1502 res_pool->ref_clocks.dccg_ref_clock_inKhz =
1503 res_pool->ref_clocks.xtalin_clock_inKhz;
1504 res_pool->ref_clocks.dchub_ref_clock_inKhz =
1505 res_pool->ref_clocks.xtalin_clock_inKhz;
1509 ASSERT_CRITICAL(false);
1511 for (i = 0; i < dc->link_count; i++) {
1512 /* Power up AND update implementation according to the
1513 * required signal (which may be different from the
1514 * default signal on connector).
1516 struct dc_link *link = dc->links[i];
1518 if (!is_optimized_init_done)
1519 link->link_enc->funcs->hw_init(link->link_enc);
1521 /* Check for enabled DIG to identify enabled display */
1522 if (link->link_enc->funcs->is_dig_enabled &&
1523 link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1524 link->link_status.link_active = true;
1525 if (link->link_enc->funcs->fec_is_active &&
1526 link->link_enc->funcs->fec_is_active(link->link_enc))
1527 link->fec_state = dc_link_fec_enabled;
1531 /* we want to turn off all dp displays before doing detection */
1532 dc_link_blank_all_dp_displays(dc);
1534 if (hws->funcs.enable_power_gating_plane)
1535 hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1537 /* If taking control over from VBIOS, we may want to optimize our first
1538 * mode set, so we need to skip powering down pipes until we know which
1539 * pipes we want to use.
1540 * Otherwise, if taking control is not possible, we need to power
1543 if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
1544 if (!is_optimized_init_done) {
1545 hws->funcs.init_pipes(dc, dc->current_state);
1546 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1547 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1548 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1552 if (!is_optimized_init_done) {
1554 for (i = 0; i < res_pool->audio_count; i++) {
1555 struct audio *audio = res_pool->audios[i];
1557 audio->funcs->hw_init(audio);
1560 for (i = 0; i < dc->link_count; i++) {
1561 struct dc_link *link = dc->links[i];
1563 if (link->panel_cntl)
1564 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1568 abm->funcs->abm_init(abm, backlight);
1570 if (dmcu != NULL && !dmcu->auto_load_dmcu)
1571 dmcu->funcs->dmcu_init(dmcu);
1574 if (abm != NULL && dmcu != NULL)
1575 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1577 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1578 if (!is_optimized_init_done)
1579 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1581 if (!dc->debug.disable_clock_gate) {
1582 /* enable all DCN clock gating */
1583 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1585 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1587 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1590 if (dc->clk_mgr->funcs->notify_wm_ranges)
1591 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1594 /* In headless boot cases, DIG may be turned
1595 * on which causes HW/SW discrepancies.
1596 * To avoid this, power down hardware on boot
1597 * if DIG is turned on
1599 void dcn10_power_down_on_boot(struct dc *dc)
1601 struct dc_link *edp_links[MAX_NUM_EDP];
1602 struct dc_link *edp_link = NULL;
1606 get_edp_links(dc, edp_links, &edp_num);
1608 edp_link = edp_links[0];
1610 if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1611 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1612 dc->hwseq->funcs.edp_backlight_control &&
1613 dc->hwss.power_down &&
1614 dc->hwss.edp_power_control) {
1615 dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1616 dc->hwss.power_down(dc);
1617 dc->hwss.edp_power_control(edp_link, false);
1619 for (i = 0; i < dc->link_count; i++) {
1620 struct dc_link *link = dc->links[i];
1622 if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
1623 link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1624 dc->hwss.power_down) {
1625 dc->hwss.power_down(dc);
1633 * Call update_clocks with empty context
1634 * to send DISPLAY_OFF
1635 * Otherwise DISPLAY_OFF may not be asserted
1637 if (dc->clk_mgr->funcs->set_low_power_state)
1638 dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1641 void dcn10_reset_hw_ctx_wrap(
1643 struct dc_state *context)
1646 struct dce_hwseq *hws = dc->hwseq;
1649 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1650 struct pipe_ctx *pipe_ctx_old =
1651 &dc->current_state->res_ctx.pipe_ctx[i];
1652 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1654 if (!pipe_ctx_old->stream)
1657 if (pipe_ctx_old->top_pipe)
1660 if (!pipe_ctx->stream ||
1661 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1662 struct clock_source *old_clk = pipe_ctx_old->clock_source;
1664 dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1665 if (hws->funcs.enable_stream_gating)
1666 hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1668 old_clk->funcs->cs_power_down(old_clk);
1673 static bool patch_address_for_sbs_tb_stereo(
1674 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1676 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1677 bool sec_split = pipe_ctx->top_pipe &&
1678 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1679 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1680 (pipe_ctx->stream->timing.timing_3d_format ==
1681 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1682 pipe_ctx->stream->timing.timing_3d_format ==
1683 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1684 *addr = plane_state->address.grph_stereo.left_addr;
1685 plane_state->address.grph_stereo.left_addr =
1686 plane_state->address.grph_stereo.right_addr;
1689 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1690 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1691 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1692 plane_state->address.grph_stereo.right_addr =
1693 plane_state->address.grph_stereo.left_addr;
1694 plane_state->address.grph_stereo.right_meta_addr =
1695 plane_state->address.grph_stereo.left_meta_addr;
1701 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1703 bool addr_patched = false;
1704 PHYSICAL_ADDRESS_LOC addr;
1705 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1707 if (plane_state == NULL)
1710 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1712 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1713 pipe_ctx->plane_res.hubp,
1714 &plane_state->address,
1715 plane_state->flip_immediate);
1717 plane_state->status.requested_address = plane_state->address;
1719 if (plane_state->flip_immediate)
1720 plane_state->status.current_address = plane_state->address;
1723 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1726 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1727 const struct dc_plane_state *plane_state)
1729 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1730 const struct dc_transfer_func *tf = NULL;
1733 if (dpp_base == NULL)
1736 if (plane_state->in_transfer_func)
1737 tf = plane_state->in_transfer_func;
1739 if (plane_state->gamma_correction &&
1740 !dpp_base->ctx->dc->debug.always_use_regamma
1741 && !plane_state->gamma_correction->is_identity
1742 && dce_use_lut(plane_state->format))
1743 dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1746 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1747 else if (tf->type == TF_TYPE_PREDEFINED) {
1749 case TRANSFER_FUNCTION_SRGB:
1750 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1752 case TRANSFER_FUNCTION_BT709:
1753 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1755 case TRANSFER_FUNCTION_LINEAR:
1756 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1758 case TRANSFER_FUNCTION_PQ:
1759 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1760 cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1761 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1768 } else if (tf->type == TF_TYPE_BYPASS) {
1769 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1771 cm_helper_translate_curve_to_degamma_hw_format(tf,
1772 &dpp_base->degamma_params);
1773 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1774 &dpp_base->degamma_params);
1781 #define MAX_NUM_HW_POINTS 0x200
1783 static void log_tf(struct dc_context *ctx,
1784 struct dc_transfer_func *tf, uint32_t hw_points_num)
1786 // DC_LOG_GAMMA is default logging of all hw points
1787 // DC_LOG_ALL_GAMMA logs all points, not only hw points
1788 // DC_LOG_ALL_TF_POINTS logs all channels of the tf
1791 DC_LOGGER_INIT(ctx->logger);
1792 DC_LOG_GAMMA("Gamma Correction TF");
1793 DC_LOG_ALL_GAMMA("Logging all tf points...");
1794 DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1796 for (i = 0; i < hw_points_num; i++) {
1797 DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1798 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1799 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1802 for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1803 DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1804 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1805 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1809 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1810 const struct dc_stream_state *stream)
1812 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1817 dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1819 if (stream->out_transfer_func &&
1820 stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1821 stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1822 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1824 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1827 else if (cm_helper_translate_curve_to_hw_format(
1828 stream->out_transfer_func,
1829 &dpp->regamma_params, false)) {
1830 dpp->funcs->dpp_program_regamma_pwl(
1832 &dpp->regamma_params, OPP_REGAMMA_USER);
1834 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1836 if (stream != NULL && stream->ctx != NULL &&
1837 stream->out_transfer_func != NULL) {
1839 stream->out_transfer_func,
1840 dpp->regamma_params.hw_points_num);
1846 void dcn10_pipe_control_lock(
1848 struct pipe_ctx *pipe,
1851 struct dce_hwseq *hws = dc->hwseq;
1853 /* use TG master update lock to lock everything on the TG
1854 * therefore only top pipe need to lock
1856 if (!pipe || pipe->top_pipe)
1859 if (dc->debug.sanity_checks)
1860 hws->funcs.verify_allow_pstate_change_high(dc);
1863 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1865 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1867 if (dc->debug.sanity_checks)
1868 hws->funcs.verify_allow_pstate_change_high(dc);
1872 * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1874 * Software keepout workaround to prevent cursor update locking from stalling
1875 * out cursor updates indefinitely or from old values from being retained in
1876 * the case where the viewport changes in the same frame as the cursor.
1878 * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1879 * too close to VUPDATE, then stall out until VUPDATE finishes.
1881 * TODO: Optimize cursor programming to be once per frame before VUPDATE
1882 * to avoid the need for this workaround.
1884 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1886 struct dc_stream_state *stream = pipe_ctx->stream;
1887 struct crtc_position position;
1888 uint32_t vupdate_start, vupdate_end;
1889 unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1890 unsigned int us_per_line, us_vupdate;
1892 if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1895 if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1898 dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1901 dc->hwss.get_position(&pipe_ctx, 1, &position);
1902 vpos = position.vertical_count;
1904 /* Avoid wraparound calculation issues */
1905 vupdate_start += stream->timing.v_total;
1906 vupdate_end += stream->timing.v_total;
1907 vpos += stream->timing.v_total;
1909 if (vpos <= vupdate_start) {
1910 /* VPOS is in VACTIVE or back porch. */
1911 lines_to_vupdate = vupdate_start - vpos;
1912 } else if (vpos > vupdate_end) {
1913 /* VPOS is in the front porch. */
1916 /* VPOS is in VUPDATE. */
1917 lines_to_vupdate = 0;
1920 /* Calculate time until VUPDATE in microseconds. */
1922 stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1923 us_to_vupdate = lines_to_vupdate * us_per_line;
1925 /* 70 us is a conservative estimate of cursor update time*/
1926 if (us_to_vupdate > 70)
1929 /* Stall out until the cursor update completes. */
1930 if (vupdate_end < vupdate_start)
1931 vupdate_end += stream->timing.v_total;
1932 us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1933 udelay(us_to_vupdate + us_vupdate);
1936 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1938 /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1939 if (!pipe || pipe->top_pipe)
1942 /* Prevent cursor lock from stalling out cursor updates. */
1944 delay_cursor_until_vupdate(dc, pipe);
1946 if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1947 union dmub_hw_lock_flags hw_locks = { 0 };
1948 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1950 hw_locks.bits.lock_cursor = 1;
1951 inst_flags.opp_inst = pipe->stream_res.opp->inst;
1953 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1958 dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1959 pipe->stream_res.opp->inst, lock);
1962 static bool wait_for_reset_trigger_to_occur(
1963 struct dc_context *dc_ctx,
1964 struct timing_generator *tg)
1968 /* To avoid endless loop we wait at most
1969 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1970 const uint32_t frames_to_wait_on_triggered_reset = 10;
1973 for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1975 if (!tg->funcs->is_counter_moving(tg)) {
1976 DC_ERROR("TG counter is not moving!\n");
1980 if (tg->funcs->did_triggered_reset_occur(tg)) {
1982 /* usually occurs at i=1 */
1983 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
1988 /* Wait for one frame. */
1989 tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
1990 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
1994 DC_ERROR("GSL: Timeout on reset trigger!\n");
1999 static uint64_t reduceSizeAndFraction(uint64_t *numerator,
2000 uint64_t *denominator,
2001 bool checkUint32Bounary)
2004 bool ret = checkUint32Bounary == false;
2005 uint64_t max_int32 = 0xffffffff;
2006 uint64_t num, denom;
2007 static const uint16_t prime_numbers[] = {
2008 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
2009 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
2010 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
2011 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
2012 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
2013 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
2014 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
2015 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
2016 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
2017 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
2018 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
2019 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
2020 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
2021 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
2022 941, 947, 953, 967, 971, 977, 983, 991, 997};
2023 int count = ARRAY_SIZE(prime_numbers);
2026 denom = *denominator;
2027 for (i = 0; i < count; i++) {
2028 uint32_t num_remainder, denom_remainder;
2029 uint64_t num_result, denom_result;
2030 if (checkUint32Bounary &&
2031 num <= max_int32 && denom <= max_int32) {
2036 num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
2037 denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
2038 if (num_remainder == 0 && denom_remainder == 0) {
2040 denom = denom_result;
2042 } while (num_remainder == 0 && denom_remainder == 0);
2045 *denominator = denom;
2049 static bool is_low_refresh_rate(struct pipe_ctx *pipe)
2051 uint32_t master_pipe_refresh_rate =
2052 pipe->stream->timing.pix_clk_100hz * 100 /
2053 pipe->stream->timing.h_total /
2054 pipe->stream->timing.v_total;
2055 return master_pipe_refresh_rate <= 30;
2058 static uint8_t get_clock_divider(struct pipe_ctx *pipe,
2059 bool account_low_refresh_rate)
2061 uint32_t clock_divider = 1;
2062 uint32_t numpipes = 1;
2064 if (account_low_refresh_rate && is_low_refresh_rate(pipe))
2067 if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2070 while (pipe->next_odm_pipe) {
2071 pipe = pipe->next_odm_pipe;
2074 clock_divider *= numpipes;
2076 return clock_divider;
2079 static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
2080 struct pipe_ctx *grouped_pipes[])
2082 struct dc_context *dc_ctx = dc->ctx;
2083 int i, master = -1, embedded = -1;
2084 struct dc_crtc_timing *hw_crtc_timing;
2085 uint64_t phase[MAX_PIPES];
2086 uint64_t modulo[MAX_PIPES];
2089 uint32_t embedded_pix_clk_100hz;
2090 uint16_t embedded_h_total;
2091 uint16_t embedded_v_total;
2092 uint32_t dp_ref_clk_100hz =
2093 dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2095 hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
2096 if (!hw_crtc_timing)
2099 if (dc->config.vblank_alignment_dto_params &&
2100 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2102 (dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2104 (dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2105 embedded_pix_clk_100hz =
2106 dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2108 for (i = 0; i < group_size; i++) {
2109 grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2110 grouped_pipes[i]->stream_res.tg,
2111 &hw_crtc_timing[i]);
2112 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2113 dc->res_pool->dp_clock_source,
2114 grouped_pipes[i]->stream_res.tg->inst,
2116 hw_crtc_timing[i].pix_clk_100hz = pclk;
2117 if (dc_is_embedded_signal(
2118 grouped_pipes[i]->stream->signal)) {
2121 phase[i] = embedded_pix_clk_100hz*100;
2122 modulo[i] = dp_ref_clk_100hz*100;
2125 phase[i] = (uint64_t)embedded_pix_clk_100hz*
2126 hw_crtc_timing[i].h_total*
2127 hw_crtc_timing[i].v_total;
2128 phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2129 modulo[i] = (uint64_t)dp_ref_clk_100hz*
2133 if (reduceSizeAndFraction(&phase[i],
2134 &modulo[i], true) == false) {
2136 * this will help to stop reporting
2137 * this timing synchronizable
2139 DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2140 grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2145 for (i = 0; i < group_size; i++) {
2146 if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2147 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2148 dc->res_pool->dp_clock_source,
2149 grouped_pipes[i]->stream_res.tg->inst,
2150 phase[i], modulo[i]);
2151 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2152 dc->res_pool->dp_clock_source,
2153 grouped_pipes[i]->stream_res.tg->inst, &pclk);
2154 grouped_pipes[i]->stream->timing.pix_clk_100hz =
2155 pclk*get_clock_divider(grouped_pipes[i], false);
2163 kfree(hw_crtc_timing);
2167 void dcn10_enable_vblanks_synchronization(
2171 struct pipe_ctx *grouped_pipes[])
2173 struct dc_context *dc_ctx = dc->ctx;
2174 struct output_pixel_processor *opp;
2175 struct timing_generator *tg;
2176 int i, width, height, master;
2178 for (i = 1; i < group_size; i++) {
2179 opp = grouped_pipes[i]->stream_res.opp;
2180 tg = grouped_pipes[i]->stream_res.tg;
2181 tg->funcs->get_otg_active_size(tg, &width, &height);
2182 if (opp->funcs->opp_program_dpg_dimensions)
2183 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2186 for (i = 0; i < group_size; i++) {
2187 if (grouped_pipes[i]->stream == NULL)
2189 grouped_pipes[i]->stream->vblank_synchronized = false;
2190 grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2193 DC_SYNC_INFO("Aligning DP DTOs\n");
2195 master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2197 DC_SYNC_INFO("Synchronizing VBlanks\n");
2200 for (i = 0; i < group_size; i++) {
2201 if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2202 grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2203 grouped_pipes[master]->stream_res.tg,
2204 grouped_pipes[i]->stream_res.tg,
2205 grouped_pipes[master]->stream->timing.pix_clk_100hz,
2206 grouped_pipes[i]->stream->timing.pix_clk_100hz,
2207 get_clock_divider(grouped_pipes[master], false),
2208 get_clock_divider(grouped_pipes[i], false));
2209 grouped_pipes[i]->stream->vblank_synchronized = true;
2211 grouped_pipes[master]->stream->vblank_synchronized = true;
2212 DC_SYNC_INFO("Sync complete\n");
2215 for (i = 1; i < group_size; i++) {
2216 opp = grouped_pipes[i]->stream_res.opp;
2217 tg = grouped_pipes[i]->stream_res.tg;
2218 tg->funcs->get_otg_active_size(tg, &width, &height);
2219 if (opp->funcs->opp_program_dpg_dimensions)
2220 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2224 void dcn10_enable_timing_synchronization(
2228 struct pipe_ctx *grouped_pipes[])
2230 struct dc_context *dc_ctx = dc->ctx;
2231 struct output_pixel_processor *opp;
2232 struct timing_generator *tg;
2233 int i, width, height;
2235 DC_SYNC_INFO("Setting up OTG reset trigger\n");
2237 for (i = 1; i < group_size; i++) {
2238 opp = grouped_pipes[i]->stream_res.opp;
2239 tg = grouped_pipes[i]->stream_res.tg;
2240 tg->funcs->get_otg_active_size(tg, &width, &height);
2241 if (opp->funcs->opp_program_dpg_dimensions)
2242 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2245 for (i = 0; i < group_size; i++) {
2246 if (grouped_pipes[i]->stream == NULL)
2248 grouped_pipes[i]->stream->vblank_synchronized = false;
2251 for (i = 1; i < group_size; i++)
2252 grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2253 grouped_pipes[i]->stream_res.tg,
2254 grouped_pipes[0]->stream_res.tg->inst);
2256 DC_SYNC_INFO("Waiting for trigger\n");
2258 /* Need to get only check 1 pipe for having reset as all the others are
2259 * synchronized. Look at last pipe programmed to reset.
2262 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2263 for (i = 1; i < group_size; i++)
2264 grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2265 grouped_pipes[i]->stream_res.tg);
2267 for (i = 1; i < group_size; i++) {
2268 opp = grouped_pipes[i]->stream_res.opp;
2269 tg = grouped_pipes[i]->stream_res.tg;
2270 tg->funcs->get_otg_active_size(tg, &width, &height);
2271 if (opp->funcs->opp_program_dpg_dimensions)
2272 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2275 DC_SYNC_INFO("Sync complete\n");
2278 void dcn10_enable_per_frame_crtc_position_reset(
2281 struct pipe_ctx *grouped_pipes[])
2283 struct dc_context *dc_ctx = dc->ctx;
2286 DC_SYNC_INFO("Setting up\n");
2287 for (i = 0; i < group_size; i++)
2288 if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2289 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2290 grouped_pipes[i]->stream_res.tg,
2292 &grouped_pipes[i]->stream->triggered_crtc_reset);
2294 DC_SYNC_INFO("Waiting for trigger\n");
2296 for (i = 0; i < group_size; i++)
2297 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2299 DC_SYNC_INFO("Multi-display sync is complete\n");
2302 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2303 struct vm_system_aperture_param *apt,
2304 struct dce_hwseq *hws)
2306 PHYSICAL_ADDRESS_LOC physical_page_number;
2307 uint32_t logical_addr_low;
2308 uint32_t logical_addr_high;
2310 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2311 PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2312 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2313 PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2315 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2316 LOGICAL_ADDR, &logical_addr_low);
2318 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2319 LOGICAL_ADDR, &logical_addr_high);
2321 apt->sys_default.quad_part = physical_page_number.quad_part << 12;
2322 apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
2323 apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
2326 /* Temporary read settings, future will get values from kmd directly */
2327 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2328 struct vm_context0_param *vm0,
2329 struct dce_hwseq *hws)
2331 PHYSICAL_ADDRESS_LOC fb_base;
2332 PHYSICAL_ADDRESS_LOC fb_offset;
2333 uint32_t fb_base_value;
2334 uint32_t fb_offset_value;
2336 REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2337 REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2339 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2340 PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2341 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2342 PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2344 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2345 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2346 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2347 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2349 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2350 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2351 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2352 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2354 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2355 PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2356 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2357 PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2360 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2361 * Therefore we need to do
2362 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2363 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2365 fb_base.quad_part = (uint64_t)fb_base_value << 24;
2366 fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2367 vm0->pte_base.quad_part += fb_base.quad_part;
2368 vm0->pte_base.quad_part -= fb_offset.quad_part;
2372 static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2374 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2375 struct vm_system_aperture_param apt = {0};
2376 struct vm_context0_param vm0 = {0};
2378 mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2379 mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2381 hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2382 hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2385 static void dcn10_enable_plane(
2387 struct pipe_ctx *pipe_ctx,
2388 struct dc_state *context)
2390 struct dce_hwseq *hws = dc->hwseq;
2392 if (dc->debug.sanity_checks) {
2393 hws->funcs.verify_allow_pstate_change_high(dc);
2396 undo_DEGVIDCN10_253_wa(dc);
2398 power_on_plane(dc->hwseq,
2399 pipe_ctx->plane_res.hubp->inst);
2401 /* enable DCFCLK current DCHUB */
2402 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2404 /* make sure OPP_PIPE_CLOCK_EN = 1 */
2405 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2406 pipe_ctx->stream_res.opp,
2409 if (dc->config.gpu_vm_support)
2410 dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2412 if (dc->debug.sanity_checks) {
2413 hws->funcs.verify_allow_pstate_change_high(dc);
2416 if (!pipe_ctx->top_pipe
2417 && pipe_ctx->plane_state
2418 && pipe_ctx->plane_state->flip_int_enabled
2419 && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2420 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2424 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2427 struct dpp_grph_csc_adjustment adjust;
2428 memset(&adjust, 0, sizeof(adjust));
2429 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2432 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2433 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2434 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2435 adjust.temperature_matrix[i] =
2436 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2437 } else if (pipe_ctx->plane_state &&
2438 pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2439 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2440 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2441 adjust.temperature_matrix[i] =
2442 pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2445 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2449 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2451 if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2452 if (pipe_ctx->top_pipe) {
2453 struct pipe_ctx *top = pipe_ctx->top_pipe;
2455 while (top->top_pipe)
2456 top = top->top_pipe; // Traverse to top pipe_ctx
2457 if (top->plane_state && top->plane_state->layer_index == 0)
2458 return true; // Front MPO plane not hidden
2464 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2466 // Override rear plane RGB bias to fix MPO brightness
2467 uint16_t rgb_bias = matrix[3];
2472 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2473 matrix[3] = rgb_bias;
2474 matrix[7] = rgb_bias;
2475 matrix[11] = rgb_bias;
2478 void dcn10_program_output_csc(struct dc *dc,
2479 struct pipe_ctx *pipe_ctx,
2480 enum dc_color_space colorspace,
2484 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2485 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2487 /* MPO is broken with RGB colorspaces when OCSC matrix
2488 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2489 * Blending adds offsets from front + rear to rear plane
2491 * Fix is to set RGB bias to 0 on rear plane, top plane
2492 * black value pixels add offset instead of rear + front
2495 int16_t rgb_bias = matrix[3];
2496 // matrix[3/7/11] are all the same offset value
2498 if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2499 dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2501 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2505 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2506 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2510 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2512 struct dc_bias_and_scale bns_params = {0};
2514 // program the input csc
2515 dpp->funcs->dpp_setup(dpp,
2516 plane_state->format,
2517 EXPANSION_MODE_ZERO,
2518 plane_state->input_csc_color_matrix,
2519 plane_state->color_space,
2522 //set scale and bias registers
2523 build_prescale_params(&bns_params, plane_state);
2524 if (dpp->funcs->dpp_program_bias_and_scale)
2525 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2528 void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
2530 struct mpc *mpc = dc->res_pool->mpc;
2532 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
2533 get_hdr_visual_confirm_color(pipe_ctx, color);
2534 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
2535 get_surface_visual_confirm_color(pipe_ctx, color);
2536 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
2537 get_surface_tile_visual_confirm_color(pipe_ctx, color);
2539 color_space_to_black_color(
2540 dc, pipe_ctx->stream->output_color_space, color);
2542 if (mpc->funcs->set_bg_color)
2543 mpc->funcs->set_bg_color(mpc, color, mpcc_id);
2546 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2548 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2549 struct mpcc_blnd_cfg blnd_cfg = {0};
2550 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2552 struct mpcc *new_mpcc;
2553 struct mpc *mpc = dc->res_pool->mpc;
2554 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2556 blnd_cfg.overlap_only = false;
2557 blnd_cfg.global_gain = 0xff;
2559 if (per_pixel_alpha) {
2560 /* DCN1.0 has output CM before MPC which seems to screw with
2561 * pre-multiplied alpha.
2563 blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
2564 pipe_ctx->stream->output_color_space)
2565 && pipe_ctx->plane_state->pre_multiplied_alpha);
2566 if (pipe_ctx->plane_state->global_alpha) {
2567 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
2568 blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
2570 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2573 blnd_cfg.pre_multiplied_alpha = false;
2574 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2577 if (pipe_ctx->plane_state->global_alpha)
2578 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2580 blnd_cfg.global_alpha = 0xff;
2584 * Note: currently there is a bug in init_hw such that
2585 * on resume from hibernate, BIOS sets up MPCC0, and
2586 * we do mpcc_remove but the mpcc cannot go to idle
2587 * after remove. This cause us to pick mpcc1 here,
2588 * which causes a pstate hang for yet unknown reason.
2590 mpcc_id = hubp->inst;
2592 /* If there is no full update, don't need to touch MPC tree*/
2593 if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2594 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2595 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2599 /* check if this MPCC is already being used */
2600 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2601 /* remove MPCC if being used */
2602 if (new_mpcc != NULL)
2603 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2605 if (dc->debug.sanity_checks)
2606 mpc->funcs->assert_mpcc_idle_before_connect(
2607 dc->res_pool->mpc, mpcc_id);
2609 /* Call MPC to insert new plane */
2610 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2617 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2619 ASSERT(new_mpcc != NULL);
2620 hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2621 hubp->mpcc_id = mpcc_id;
2624 static void update_scaler(struct pipe_ctx *pipe_ctx)
2626 bool per_pixel_alpha =
2627 pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2629 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2630 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2631 /* scaler configuration */
2632 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2633 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2636 static void dcn10_update_dchubp_dpp(
2638 struct pipe_ctx *pipe_ctx,
2639 struct dc_state *context)
2641 struct dce_hwseq *hws = dc->hwseq;
2642 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2643 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2644 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2645 struct plane_size size = plane_state->plane_size;
2646 unsigned int compat_level = 0;
2647 bool should_divided_by_2 = false;
2649 /* depends on DML calculation, DPP clock value may change dynamically */
2650 /* If request max dpp clk is lower than current dispclk, no need to
2653 if (plane_state->update_flags.bits.full_update) {
2655 /* new calculated dispclk, dppclk are stored in
2656 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2657 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2658 * dcn10_validate_bandwidth compute new dispclk, dppclk.
2659 * dispclk will put in use after optimize_bandwidth when
2660 * ramp_up_dispclk_with_dpp is called.
2661 * there are two places for dppclk be put in use. One location
2662 * is the same as the location as dispclk. Another is within
2663 * update_dchubp_dpp which happens between pre_bandwidth and
2664 * optimize_bandwidth.
2665 * dppclk updated within update_dchubp_dpp will cause new
2666 * clock values of dispclk and dppclk not be in use at the same
2667 * time. when clocks are decreased, this may cause dppclk is
2668 * lower than previous configuration and let pipe stuck.
2669 * for example, eDP + external dp, change resolution of DP from
2670 * 1920x1080x144hz to 1280x960x60hz.
2671 * before change: dispclk = 337889 dppclk = 337889
2672 * change mode, dcn10_validate_bandwidth calculate
2673 * dispclk = 143122 dppclk = 143122
2674 * update_dchubp_dpp be executed before dispclk be updated,
2675 * dispclk = 337889, but dppclk use new value dispclk /2 =
2676 * 168944. this will cause pipe pstate warning issue.
2677 * solution: between pre_bandwidth and optimize_bandwidth, while
2678 * dispclk is going to be decreased, keep dppclk = dispclk
2680 if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2681 dc->clk_mgr->clks.dispclk_khz)
2682 should_divided_by_2 = false;
2684 should_divided_by_2 =
2685 context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2686 dc->clk_mgr->clks.dispclk_khz / 2;
2688 dpp->funcs->dpp_dppclk_control(
2690 should_divided_by_2,
2693 if (dc->res_pool->dccg)
2694 dc->res_pool->dccg->funcs->update_dpp_dto(
2697 pipe_ctx->plane_res.bw.dppclk_khz);
2699 dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2700 dc->clk_mgr->clks.dispclk_khz / 2 :
2701 dc->clk_mgr->clks.dispclk_khz;
2704 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2705 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2706 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2708 if (plane_state->update_flags.bits.full_update) {
2709 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2711 hubp->funcs->hubp_setup(
2713 &pipe_ctx->dlg_regs,
2714 &pipe_ctx->ttu_regs,
2716 &pipe_ctx->pipe_dlg_param);
2717 hubp->funcs->hubp_setup_interdependent(
2719 &pipe_ctx->dlg_regs,
2720 &pipe_ctx->ttu_regs);
2723 size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2725 if (plane_state->update_flags.bits.full_update ||
2726 plane_state->update_flags.bits.bpp_change)
2727 dcn10_update_dpp(dpp, plane_state);
2729 if (plane_state->update_flags.bits.full_update ||
2730 plane_state->update_flags.bits.per_pixel_alpha_change ||
2731 plane_state->update_flags.bits.global_alpha_change)
2732 hws->funcs.update_mpcc(dc, pipe_ctx);
2734 if (plane_state->update_flags.bits.full_update ||
2735 plane_state->update_flags.bits.per_pixel_alpha_change ||
2736 plane_state->update_flags.bits.global_alpha_change ||
2737 plane_state->update_flags.bits.scaling_change ||
2738 plane_state->update_flags.bits.position_change) {
2739 update_scaler(pipe_ctx);
2742 if (plane_state->update_flags.bits.full_update ||
2743 plane_state->update_flags.bits.scaling_change ||
2744 plane_state->update_flags.bits.position_change) {
2745 hubp->funcs->mem_program_viewport(
2747 &pipe_ctx->plane_res.scl_data.viewport,
2748 &pipe_ctx->plane_res.scl_data.viewport_c);
2751 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2752 dc->hwss.set_cursor_position(pipe_ctx);
2753 dc->hwss.set_cursor_attribute(pipe_ctx);
2755 if (dc->hwss.set_cursor_sdr_white_level)
2756 dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2759 if (plane_state->update_flags.bits.full_update) {
2761 dc->hwss.program_gamut_remap(pipe_ctx);
2763 dc->hwss.program_output_csc(dc,
2765 pipe_ctx->stream->output_color_space,
2766 pipe_ctx->stream->csc_color_matrix.matrix,
2767 pipe_ctx->stream_res.opp->inst);
2770 if (plane_state->update_flags.bits.full_update ||
2771 plane_state->update_flags.bits.pixel_format_change ||
2772 plane_state->update_flags.bits.horizontal_mirror_change ||
2773 plane_state->update_flags.bits.rotation_change ||
2774 plane_state->update_flags.bits.swizzle_change ||
2775 plane_state->update_flags.bits.dcc_change ||
2776 plane_state->update_flags.bits.bpp_change ||
2777 plane_state->update_flags.bits.scaling_change ||
2778 plane_state->update_flags.bits.plane_size_change) {
2779 hubp->funcs->hubp_program_surface_config(
2781 plane_state->format,
2782 &plane_state->tiling_info,
2784 plane_state->rotation,
2786 plane_state->horizontal_mirror,
2790 hubp->power_gated = false;
2792 hws->funcs.update_plane_addr(dc, pipe_ctx);
2794 if (is_pipe_tree_visible(pipe_ctx))
2795 hubp->funcs->set_blank(hubp, false);
2798 void dcn10_blank_pixel_data(
2800 struct pipe_ctx *pipe_ctx,
2803 enum dc_color_space color_space;
2804 struct tg_color black_color = {0};
2805 struct stream_resource *stream_res = &pipe_ctx->stream_res;
2806 struct dc_stream_state *stream = pipe_ctx->stream;
2808 /* program otg blank color */
2809 color_space = stream->output_color_space;
2810 color_space_to_black_color(dc, color_space, &black_color);
2813 * The way 420 is packed, 2 channels carry Y component, 1 channel
2814 * alternate between Cb and Cr, so both channels need the pixel
2817 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2818 black_color.color_r_cr = black_color.color_g_y;
2821 if (stream_res->tg->funcs->set_blank_color)
2822 stream_res->tg->funcs->set_blank_color(
2827 if (stream_res->tg->funcs->set_blank)
2828 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2829 if (stream_res->abm) {
2830 dc->hwss.set_pipe(pipe_ctx);
2831 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2834 dc->hwss.set_abm_immediate_disable(pipe_ctx);
2835 if (stream_res->tg->funcs->set_blank) {
2836 stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2837 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2842 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2844 struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2845 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2846 struct custom_float_format fmt;
2848 fmt.exponenta_bits = 6;
2849 fmt.mantissa_bits = 12;
2853 if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2854 convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2856 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2857 pipe_ctx->plane_res.dpp, hw_mult);
2860 void dcn10_program_pipe(
2862 struct pipe_ctx *pipe_ctx,
2863 struct dc_state *context)
2865 struct dce_hwseq *hws = dc->hwseq;
2867 if (pipe_ctx->top_pipe == NULL) {
2868 bool blank = !is_pipe_tree_visible(pipe_ctx);
2870 pipe_ctx->stream_res.tg->funcs->program_global_sync(
2871 pipe_ctx->stream_res.tg,
2872 pipe_ctx->pipe_dlg_param.vready_offset,
2873 pipe_ctx->pipe_dlg_param.vstartup_start,
2874 pipe_ctx->pipe_dlg_param.vupdate_offset,
2875 pipe_ctx->pipe_dlg_param.vupdate_width);
2877 pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2878 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2880 if (hws->funcs.setup_vupdate_interrupt)
2881 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2883 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2886 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2887 dcn10_enable_plane(dc, pipe_ctx, context);
2889 dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2891 hws->funcs.set_hdr_multiplier(pipe_ctx);
2893 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2894 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2895 pipe_ctx->plane_state->update_flags.bits.gamma_change)
2896 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2898 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
2899 * only do gamma programming for full update.
2900 * TODO: This can be further optimized/cleaned up
2901 * Always call this for now since it does memcmp inside before
2902 * doing heavy calculation and programming
2904 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2905 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2908 void dcn10_wait_for_pending_cleared(struct dc *dc,
2909 struct dc_state *context)
2911 struct pipe_ctx *pipe_ctx;
2912 struct timing_generator *tg;
2915 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2916 pipe_ctx = &context->res_ctx.pipe_ctx[i];
2917 tg = pipe_ctx->stream_res.tg;
2920 * Only wait for top pipe's tg penindg bit
2921 * Also skip if pipe is disabled.
2923 if (pipe_ctx->top_pipe ||
2924 !pipe_ctx->stream || !pipe_ctx->plane_state ||
2925 !tg->funcs->is_tg_enabled(tg))
2929 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
2930 * For some reason waiting for OTG_UPDATE_PENDING cleared
2931 * seems to not trigger the update right away, and if we
2932 * lock again before VUPDATE then we don't get a separated
2935 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
2936 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
2940 void dcn10_post_unlock_program_front_end(
2942 struct dc_state *context)
2946 DC_LOGGER_INIT(dc->ctx->logger);
2948 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2949 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2951 if (!pipe_ctx->top_pipe &&
2952 !pipe_ctx->prev_odm_pipe &&
2954 struct timing_generator *tg = pipe_ctx->stream_res.tg;
2956 if (context->stream_status[i].plane_count == 0)
2957 false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
2961 for (i = 0; i < dc->res_pool->pipe_count; i++)
2962 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
2963 dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
2965 for (i = 0; i < dc->res_pool->pipe_count; i++)
2966 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
2967 dc->hwss.optimize_bandwidth(dc, context);
2971 if (dc->hwseq->wa.DEGVIDCN10_254)
2972 hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
2975 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
2979 for (i = 0; i < context->stream_count; i++) {
2980 if (context->streams[i]->timing.timing_3d_format
2981 == TIMING_3D_FORMAT_HW_FRAME_PACKING) {
2985 hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
2991 void dcn10_prepare_bandwidth(
2993 struct dc_state *context)
2995 struct dce_hwseq *hws = dc->hwseq;
2996 struct hubbub *hubbub = dc->res_pool->hubbub;
2998 if (dc->debug.sanity_checks)
2999 hws->funcs.verify_allow_pstate_change_high(dc);
3001 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3002 if (context->stream_count == 0)
3003 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3005 dc->clk_mgr->funcs->update_clocks(
3011 dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
3012 &context->bw_ctx.bw.dcn.watermarks,
3013 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3015 dcn10_stereo_hw_frame_pack_wa(dc, context);
3017 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3019 dcn_bw_notify_pplib_of_wm_ranges(dc);
3023 if (dc->debug.sanity_checks)
3024 hws->funcs.verify_allow_pstate_change_high(dc);
3027 void dcn10_optimize_bandwidth(
3029 struct dc_state *context)
3031 struct dce_hwseq *hws = dc->hwseq;
3032 struct hubbub *hubbub = dc->res_pool->hubbub;
3034 if (dc->debug.sanity_checks)
3035 hws->funcs.verify_allow_pstate_change_high(dc);
3037 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3038 if (context->stream_count == 0)
3039 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3041 dc->clk_mgr->funcs->update_clocks(
3047 hubbub->funcs->program_watermarks(hubbub,
3048 &context->bw_ctx.bw.dcn.watermarks,
3049 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3052 dcn10_stereo_hw_frame_pack_wa(dc, context);
3054 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3056 dcn_bw_notify_pplib_of_wm_ranges(dc);
3060 if (dc->debug.sanity_checks)
3061 hws->funcs.verify_allow_pstate_change_high(dc);
3064 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3065 int num_pipes, struct dc_crtc_timing_adjust adjust)
3068 struct drr_params params = {0};
3069 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3070 unsigned int event_triggers = 0x800;
3071 // Note DRR trigger events are generated regardless of whether num frames met.
3072 unsigned int num_frames = 2;
3074 params.vertical_total_max = adjust.v_total_max;
3075 params.vertical_total_min = adjust.v_total_min;
3076 params.vertical_total_mid = adjust.v_total_mid;
3077 params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3078 /* TODO: If multiple pipes are to be supported, you need
3079 * some GSL stuff. Static screen triggers may be programmed differently
3082 for (i = 0; i < num_pipes; i++) {
3083 if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
3084 if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
3085 pipe_ctx[i]->stream_res.tg->funcs->set_drr(
3086 pipe_ctx[i]->stream_res.tg, ¶ms);
3087 if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3088 if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
3089 pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
3090 pipe_ctx[i]->stream_res.tg,
3091 event_triggers, num_frames);
3096 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3098 struct crtc_position *position)
3102 /* TODO: handle pipes > 1
3104 for (i = 0; i < num_pipes; i++)
3105 pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3108 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3109 int num_pipes, const struct dc_static_screen_params *params)
3112 unsigned int triggers = 0;
3114 if (params->triggers.surface_update)
3116 if (params->triggers.cursor_update)
3118 if (params->triggers.force_trigger)
3121 for (i = 0; i < num_pipes; i++)
3122 pipe_ctx[i]->stream_res.tg->funcs->
3123 set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3124 triggers, params->num_frames);
3127 static void dcn10_config_stereo_parameters(
3128 struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3130 enum view_3d_format view_format = stream->view_format;
3131 enum dc_timing_3d_format timing_3d_format =\
3132 stream->timing.timing_3d_format;
3133 bool non_stereo_timing = false;
3135 if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3136 timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3137 timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3138 non_stereo_timing = true;
3140 if (non_stereo_timing == false &&
3141 view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3143 flags->PROGRAM_STEREO = 1;
3144 flags->PROGRAM_POLARITY = 1;
3145 if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
3146 timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3147 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3148 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3149 enum display_dongle_type dongle = \
3150 stream->link->ddc->dongle_type;
3151 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3152 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3153 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3154 flags->DISABLE_STEREO_DP_SYNC = 1;
3156 flags->RIGHT_EYE_POLARITY =\
3157 stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3158 if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3159 flags->FRAME_PACKED = 1;
3165 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3167 struct crtc_stereo_flags flags = { 0 };
3168 struct dc_stream_state *stream = pipe_ctx->stream;
3170 dcn10_config_stereo_parameters(stream, &flags);
3172 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3173 if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3174 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3176 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3179 pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3180 pipe_ctx->stream_res.opp,
3181 flags.PROGRAM_STEREO == 1,
3184 pipe_ctx->stream_res.tg->funcs->program_stereo(
3185 pipe_ctx->stream_res.tg,
3192 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3196 for (i = 0; i < res_pool->pipe_count; i++) {
3197 if (res_pool->hubps[i]->inst == mpcc_inst)
3198 return res_pool->hubps[i];
3204 void dcn10_wait_for_mpcc_disconnect(
3206 struct resource_pool *res_pool,
3207 struct pipe_ctx *pipe_ctx)
3209 struct dce_hwseq *hws = dc->hwseq;
3212 if (dc->debug.sanity_checks) {
3213 hws->funcs.verify_allow_pstate_change_high(dc);
3216 if (!pipe_ctx->stream_res.opp)
3219 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3220 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3221 struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3223 if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
3224 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3225 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3226 hubp->funcs->set_blank(hubp, true);
3230 if (dc->debug.sanity_checks) {
3231 hws->funcs.verify_allow_pstate_change_high(dc);
3236 bool dcn10_dummy_display_power_gating(
3238 uint8_t controller_id,
3239 struct dc_bios *dcb,
3240 enum pipe_gating_control power_gating)
3245 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3247 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3248 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3250 struct dc *dc = pipe_ctx->stream->ctx->dc;
3252 if (plane_state == NULL)
3255 flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3256 pipe_ctx->plane_res.hubp);
3258 plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3261 plane_state->status.current_address = plane_state->status.requested_address;
3263 if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3264 tg->funcs->is_stereo_left_eye) {
3265 plane_state->status.is_right_eye =
3266 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3269 if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3270 struct dce_hwseq *hwseq = dc->hwseq;
3271 struct timing_generator *tg = dc->res_pool->timing_generators[0];
3272 unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3274 if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3275 struct hubbub *hubbub = dc->res_pool->hubbub;
3277 hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3278 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3283 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3285 struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3287 /* In DCN, this programming sequence is owned by the hubbub */
3288 hubbub->funcs->update_dchub(hubbub, dh_data);
3291 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3293 struct pipe_ctx *test_pipe, *split_pipe;
3294 const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
3295 struct rect r1 = scl_data->recout, r2, r2_half;
3296 int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
3297 int cur_layer = pipe_ctx->plane_state->layer_index;
3300 * Disable the cursor if there's another pipe above this with a
3301 * plane that contains this pipe's viewport to prevent double cursor
3302 * and incorrect scaling artifacts.
3304 for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3305 test_pipe = test_pipe->top_pipe) {
3306 // Skip invisible layer and pipe-split plane on same layer
3307 if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
3310 r2 = test_pipe->plane_res.scl_data.recout;
3311 r2_r = r2.x + r2.width;
3312 r2_b = r2.y + r2.height;
3313 split_pipe = test_pipe;
3316 * There is another half plane on same layer because of
3317 * pipe-split, merge together per same height.
3319 for (split_pipe = pipe_ctx->top_pipe; split_pipe;
3320 split_pipe = split_pipe->top_pipe)
3321 if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
3322 r2_half = split_pipe->plane_res.scl_data.recout;
3323 r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
3324 r2.width = r2.width + r2_half.width;
3325 r2_r = r2.x + r2.width;
3329 if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
3336 static bool dcn10_dmub_should_update_cursor_data(
3337 struct pipe_ctx *pipe_ctx,
3338 struct dc_debug_options *debug)
3340 if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3343 if (pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
3346 if (pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1 &&
3347 debug->enable_sw_cntl_psr)
3353 static void dcn10_dmub_update_cursor_data(
3354 struct pipe_ctx *pipe_ctx,
3356 const struct dc_cursor_mi_param *param,
3357 const struct dc_cursor_position *cur_pos,
3358 const struct dc_cursor_attributes *cur_attr)
3360 union dmub_rb_cmd cmd;
3361 struct dmub_cmd_update_cursor_info_data *update_cursor_info;
3362 const struct dc_cursor_position *pos;
3363 const struct dc_cursor_attributes *attr;
3364 int src_x_offset = 0;
3365 int src_y_offset = 0;
3367 int cursor_height = 0;
3368 int cursor_width = 0;
3369 uint32_t cur_en = 0;
3370 unsigned int panel_inst = 0;
3372 struct dc_debug_options *debug = &hubp->ctx->dc->debug;
3374 if (!dcn10_dmub_should_update_cursor_data(pipe_ctx, debug))
3377 * if cur_pos == NULL means the caller is from cursor_set_attribute
3378 * then driver use previous cursor position data
3379 * if cur_attr == NULL means the caller is from cursor_set_position
3380 * then driver use previous cursor attribute
3381 * if cur_pos or cur_attr is not NULL then update it
3383 if (cur_pos != NULL)
3386 pos = &hubp->curs_pos;
3388 if (cur_attr != NULL)
3391 attr = &hubp->curs_attr;
3393 if (!dc_get_edp_link_panel_inst(hubp->ctx->dc, pipe_ctx->stream->link, &panel_inst))
3396 src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
3397 src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
3398 x_hotspot = pos->x_hotspot;
3399 cursor_height = (int)attr->height;
3400 cursor_width = (int)attr->width;
3401 cur_en = pos->enable ? 1:0;
3403 // Rotated cursor width/height and hotspots tweaks for offset calculation
3404 if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
3405 swap(cursor_height, cursor_width);
3406 if (param->rotation == ROTATION_ANGLE_90) {
3407 src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
3408 src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
3410 } else if (param->rotation == ROTATION_ANGLE_180) {
3411 src_x_offset = pos->x - param->viewport.x;
3412 src_y_offset = pos->y - param->viewport.y;
3415 if (param->mirror) {
3416 x_hotspot = param->viewport.width - x_hotspot;
3417 src_x_offset = param->viewport.x + param->viewport.width - src_x_offset;
3420 if (src_x_offset >= (int)param->viewport.width)
3421 cur_en = 0; /* not visible beyond right edge*/
3423 if (src_x_offset + cursor_width <= 0)
3424 cur_en = 0; /* not visible beyond left edge*/
3426 if (src_y_offset >= (int)param->viewport.height)
3427 cur_en = 0; /* not visible beyond bottom edge*/
3429 if (src_y_offset + cursor_height <= 0)
3430 cur_en = 0; /* not visible beyond top edge*/
3432 // Cursor bitmaps have different hotspot values
3433 // There's a possibility that the above logic returns a negative value, so we clamp them to 0
3434 if (src_x_offset < 0)
3436 if (src_y_offset < 0)
3439 memset(&cmd, 0x0, sizeof(cmd));
3440 cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
3441 cmd.update_cursor_info.header.payload_bytes =
3442 sizeof(cmd.update_cursor_info.update_cursor_info_data);
3443 update_cursor_info = &cmd.update_cursor_info.update_cursor_info_data;
3444 update_cursor_info->cursor_rect.x = src_x_offset + param->viewport.x;
3445 update_cursor_info->cursor_rect.y = src_y_offset + param->viewport.y;
3446 update_cursor_info->cursor_rect.width = attr->width;
3447 update_cursor_info->cursor_rect.height = attr->height;
3448 update_cursor_info->enable = cur_en;
3449 update_cursor_info->pipe_idx = pipe_ctx->pipe_idx;
3450 update_cursor_info->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
3451 update_cursor_info->panel_inst = panel_inst;
3452 dc_dmub_srv_cmd_queue(pipe_ctx->stream->ctx->dmub_srv, &cmd);
3453 dc_dmub_srv_cmd_execute(pipe_ctx->stream->ctx->dmub_srv);
3454 dc_dmub_srv_wait_idle(pipe_ctx->stream->ctx->dmub_srv);
3457 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3459 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3460 struct hubp *hubp = pipe_ctx->plane_res.hubp;
3461 struct dpp *dpp = pipe_ctx->plane_res.dpp;
3462 struct dc_cursor_mi_param param = {
3463 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3464 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3465 .viewport = pipe_ctx->plane_res.scl_data.viewport,
3466 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3467 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3468 .rotation = pipe_ctx->plane_state->rotation,
3469 .mirror = pipe_ctx->plane_state->horizontal_mirror
3471 bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
3472 (pipe_ctx->bottom_pipe != NULL);
3473 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3474 (pipe_ctx->prev_odm_pipe != NULL);
3476 int x_plane = pipe_ctx->plane_state->dst_rect.x;
3477 int y_plane = pipe_ctx->plane_state->dst_rect.y;
3478 int x_pos = pos_cpy.x;
3479 int y_pos = pos_cpy.y;
3482 * DC cursor is stream space, HW cursor is plane space and drawn
3483 * as part of the framebuffer.
3485 * Cursor position can't be negative, but hotspot can be used to
3486 * shift cursor out of the plane bounds. Hotspot must be smaller
3487 * than the cursor size.
3491 * Translate cursor from stream space to plane space.
3493 * If the cursor is scaled then we need to scale the position
3494 * to be in the approximately correct place. We can't do anything
3495 * about the actual size being incorrect, that's a limitation of
3498 if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3499 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3500 pipe_ctx->plane_state->dst_rect.width;
3501 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3502 pipe_ctx->plane_state->dst_rect.height;
3504 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3505 pipe_ctx->plane_state->dst_rect.width;
3506 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3507 pipe_ctx->plane_state->dst_rect.height;
3511 * If the cursor's source viewport is clipped then we need to
3512 * translate the cursor to appear in the correct position on
3515 * This translation isn't affected by scaling so it needs to be
3516 * done *after* we adjust the position for the scale factor.
3518 * This is only done by opt-in for now since there are still
3519 * some usecases like tiled display that might enable the
3520 * cursor on both streams while expecting dc to clip it.
3522 if (pos_cpy.translate_by_source) {
3523 x_pos += pipe_ctx->plane_state->src_rect.x;
3524 y_pos += pipe_ctx->plane_state->src_rect.y;
3528 * If the position is negative then we need to add to the hotspot
3529 * to shift the cursor outside the plane.
3533 pos_cpy.x_hotspot -= x_pos;
3538 pos_cpy.y_hotspot -= y_pos;
3542 pos_cpy.x = (uint32_t)x_pos;
3543 pos_cpy.y = (uint32_t)y_pos;
3545 if (pipe_ctx->plane_state->address.type
3546 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3547 pos_cpy.enable = false;
3549 if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3550 pos_cpy.enable = false;
3552 // Swap axis and mirror horizontally
3553 if (param.rotation == ROTATION_ANGLE_90) {
3554 uint32_t temp_x = pos_cpy.x;
3556 pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3557 (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3560 // Swap axis and mirror vertically
3561 else if (param.rotation == ROTATION_ANGLE_270) {
3562 uint32_t temp_y = pos_cpy.y;
3563 int viewport_height =
3564 pipe_ctx->plane_res.scl_data.viewport.height;
3566 pipe_ctx->plane_res.scl_data.viewport.y;
3569 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3570 * For pipe split cases:
3571 * - apply offset of viewport.y to normalize pos_cpy.x
3572 * - calculate the pos_cpy.y as before
3573 * - shift pos_cpy.y back by same offset to get final value
3574 * - since we iterate through both pipes, use the lower
3575 * viewport.y for offset
3576 * For non pipe split cases, use the same calculation for
3577 * pos_cpy.y as the 180 degree rotation case below,
3578 * but use pos_cpy.x as our input because we are rotating
3581 if (pipe_split_on || odm_combine_on) {
3582 int pos_cpy_x_offset;
3583 int other_pipe_viewport_y;
3585 if (pipe_split_on) {
3586 if (pipe_ctx->bottom_pipe) {
3587 other_pipe_viewport_y =
3588 pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3590 other_pipe_viewport_y =
3591 pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3594 if (pipe_ctx->next_odm_pipe) {
3595 other_pipe_viewport_y =
3596 pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3598 other_pipe_viewport_y =
3599 pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3602 pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3603 other_pipe_viewport_y : viewport_y;
3604 pos_cpy.x -= pos_cpy_x_offset;
3605 if (pos_cpy.x > viewport_height) {
3606 pos_cpy.x = pos_cpy.x - viewport_height;
3607 pos_cpy.y = viewport_height - pos_cpy.x;
3609 pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3611 pos_cpy.y += pos_cpy_x_offset;
3613 pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3617 // Mirror horizontally and vertically
3618 else if (param.rotation == ROTATION_ANGLE_180) {
3619 int viewport_width =
3620 pipe_ctx->plane_res.scl_data.viewport.width;
3622 pipe_ctx->plane_res.scl_data.viewport.x;
3624 if (pipe_split_on || odm_combine_on) {
3625 if (pos_cpy.x >= viewport_width + viewport_x) {
3626 pos_cpy.x = 2 * viewport_width
3627 - pos_cpy.x + 2 * viewport_x;
3629 uint32_t temp_x = pos_cpy.x;
3631 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3632 if (temp_x >= viewport_x +
3633 (int)hubp->curs_attr.width || pos_cpy.x
3634 <= (int)hubp->curs_attr.width +
3635 pipe_ctx->plane_state->src_rect.x) {
3636 pos_cpy.x = temp_x + viewport_width;
3640 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3644 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3646 * delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3647 * pos_cpy.y_new = viewport.y + delta_from_bottom
3649 * pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3651 pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3652 pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3655 dcn10_dmub_update_cursor_data(pipe_ctx, hubp, ¶m, &pos_cpy, NULL);
3656 hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
3657 dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height);
3660 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3662 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3663 struct dc_cursor_mi_param param = { 0 };
3666 * If enter PSR without cursor attribute update
3667 * the cursor attribute of dmub_restore_plane
3668 * are initial value. call dmub to exit PSR and
3669 * restore plane then update cursor attribute to
3670 * avoid override with initial value
3672 if (pipe_ctx->plane_state != NULL) {
3673 param.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
3674 param.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz;
3675 param.viewport = pipe_ctx->plane_res.scl_data.viewport;
3676 param.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz;
3677 param.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert;
3678 param.rotation = pipe_ctx->plane_state->rotation;
3679 param.mirror = pipe_ctx->plane_state->horizontal_mirror;
3680 dcn10_dmub_update_cursor_data(pipe_ctx, pipe_ctx->plane_res.hubp, ¶m, NULL, attributes);
3683 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3684 pipe_ctx->plane_res.hubp, attributes);
3685 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3686 pipe_ctx->plane_res.dpp, attributes);
3689 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3691 uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3692 struct fixed31_32 multiplier;
3693 struct dpp_cursor_attributes opt_attr = { 0 };
3694 uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3695 struct custom_float_format fmt;
3697 if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3700 fmt.exponenta_bits = 5;
3701 fmt.mantissa_bits = 10;
3704 if (sdr_white_level > 80) {
3705 multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3706 convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3709 opt_attr.scale = hw_scale;
3712 pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3713 pipe_ctx->plane_res.dpp, &opt_attr);
3717 * apply_front_porch_workaround TODO FPGA still need?
3719 * This is a workaround for a bug that has existed since R5xx and has not been
3720 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3722 static void apply_front_porch_workaround(
3723 struct dc_crtc_timing *timing)
3725 if (timing->flags.INTERLACE == 1) {
3726 if (timing->v_front_porch < 2)
3727 timing->v_front_porch = 2;
3729 if (timing->v_front_porch < 1)
3730 timing->v_front_porch = 1;
3734 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3736 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3737 struct dc_crtc_timing patched_crtc_timing;
3738 int vesa_sync_start;
3740 int interlace_factor;
3741 int vertical_line_start;
3743 patched_crtc_timing = *dc_crtc_timing;
3744 apply_front_porch_workaround(&patched_crtc_timing);
3746 interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3748 vesa_sync_start = patched_crtc_timing.v_addressable +
3749 patched_crtc_timing.v_border_bottom +
3750 patched_crtc_timing.v_front_porch;
3752 asic_blank_end = (patched_crtc_timing.v_total -
3754 patched_crtc_timing.v_border_top)
3757 vertical_line_start = asic_blank_end -
3758 pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3760 return vertical_line_start;
3763 void dcn10_calc_vupdate_position(
3765 struct pipe_ctx *pipe_ctx,
3766 uint32_t *start_line,
3769 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3770 int vline_int_offset_from_vupdate =
3771 pipe_ctx->stream->periodic_interrupt0.lines_offset;
3772 int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3775 if (vline_int_offset_from_vupdate > 0)
3776 vline_int_offset_from_vupdate--;
3777 else if (vline_int_offset_from_vupdate < 0)
3778 vline_int_offset_from_vupdate++;
3780 start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
3782 if (start_position >= 0)
3783 *start_line = start_position;
3785 *start_line = dc_crtc_timing->v_total + start_position - 1;
3787 *end_line = *start_line + 2;
3789 if (*end_line >= dc_crtc_timing->v_total)
3793 static void dcn10_cal_vline_position(
3795 struct pipe_ctx *pipe_ctx,
3796 enum vline_select vline,
3797 uint32_t *start_line,
3800 enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
3802 if (vline == VLINE0)
3803 ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
3804 else if (vline == VLINE1)
3805 ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
3807 switch (ref_point) {
3808 case START_V_UPDATE:
3809 dcn10_calc_vupdate_position(
3816 // Suppose to do nothing because vsync is 0;
3824 void dcn10_setup_periodic_interrupt(
3826 struct pipe_ctx *pipe_ctx,
3827 enum vline_select vline)
3829 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3831 if (vline == VLINE0) {
3832 uint32_t start_line = 0;
3833 uint32_t end_line = 0;
3835 dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
3837 tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3839 } else if (vline == VLINE1) {
3840 pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
3842 pipe_ctx->stream->periodic_interrupt1.lines_offset);
3846 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3848 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3849 int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3851 if (start_line < 0) {
3856 if (tg->funcs->setup_vertical_interrupt2)
3857 tg->funcs->setup_vertical_interrupt2(tg, start_line);
3860 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3861 struct dc_link_settings *link_settings)
3863 struct encoder_unblank_param params = {0};
3864 struct dc_stream_state *stream = pipe_ctx->stream;
3865 struct dc_link *link = stream->link;
3866 struct dce_hwseq *hws = link->dc->hwseq;
3868 /* only 3 items below are used by unblank */
3869 params.timing = pipe_ctx->stream->timing;
3871 params.link_settings.link_rate = link_settings->link_rate;
3873 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3874 if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3875 params.timing.pix_clk_100hz /= 2;
3876 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms);
3879 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3880 hws->funcs.edp_backlight_control(link, true);
3884 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3885 const uint8_t *custom_sdp_message,
3886 unsigned int sdp_message_size)
3888 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3889 pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3890 pipe_ctx->stream_res.stream_enc,
3895 enum dc_status dcn10_set_clock(struct dc *dc,
3896 enum dc_clock_type clock_type,
3900 struct dc_state *context = dc->current_state;
3901 struct dc_clock_config clock_cfg = {0};
3902 struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3904 if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3905 return DC_FAIL_UNSUPPORTED_1;
3907 dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3908 context, clock_type, &clock_cfg);
3910 if (clk_khz > clock_cfg.max_clock_khz)
3911 return DC_FAIL_CLK_EXCEED_MAX;
3913 if (clk_khz < clock_cfg.min_clock_khz)
3914 return DC_FAIL_CLK_BELOW_MIN;
3916 if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3917 return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3919 /*update internal request clock for update clock use*/
3920 if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3921 current_clocks->dispclk_khz = clk_khz;
3922 else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3923 current_clocks->dppclk_khz = clk_khz;
3925 return DC_ERROR_UNEXPECTED;
3927 if (dc->clk_mgr->funcs->update_clocks)
3928 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3934 void dcn10_get_clock(struct dc *dc,
3935 enum dc_clock_type clock_type,
3936 struct dc_clock_config *clock_cfg)
3938 struct dc_state *context = dc->current_state;
3940 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3941 dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3945 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3947 struct resource_pool *pool = dc->res_pool;
3950 for (i = 0; i < pool->pipe_count; i++) {
3951 struct hubp *hubp = pool->hubps[i];
3952 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3954 hubp->funcs->hubp_read_state(hubp);
3957 dcc_en_bits[i] = s->dcc_en ? 1 : 0;