2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
50 #include "link_hwss.h"
51 #include "dpcd_defs.h"
53 #include "dce/dmub_psr.h"
54 #include "dc_dmub_srv.h"
55 #include "dce/dmub_hw_lock_mgr.h"
57 #include "dce/dmub_outbox.h"
60 #define DC_LOGGER_INIT(logger)
68 #define FN(reg_name, field_name) \
69 hws->shifts->field_name, hws->masks->field_name
71 /*print is 17 wide, first two characters are spaces*/
72 #define DTN_INFO_MICRO_SEC(ref_cycle) \
73 print_microsec(dc_ctx, log_ctx, ref_cycle)
75 #define GAMMA_HW_POINTS_NUM 256
77 #define PGFSM_POWER_ON 0
78 #define PGFSM_POWER_OFF 2
80 static void print_microsec(struct dc_context *dc_ctx,
81 struct dc_log_buffer_ctx *log_ctx,
84 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
85 static const unsigned int frac = 1000;
86 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
88 DTN_INFO(" %11d.%03d",
93 void dcn10_lock_all_pipes(struct dc *dc,
94 struct dc_state *context,
97 struct pipe_ctx *pipe_ctx;
98 struct pipe_ctx *old_pipe_ctx;
99 struct timing_generator *tg;
102 for (i = 0; i < dc->res_pool->pipe_count; i++) {
103 old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
104 pipe_ctx = &context->res_ctx.pipe_ctx[i];
105 tg = pipe_ctx->stream_res.tg;
108 * Only lock the top pipe's tg to prevent redundant
109 * (un)locking. Also skip if pipe is disabled.
111 if (pipe_ctx->top_pipe ||
113 (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
114 !tg->funcs->is_tg_enabled(tg))
118 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
120 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
124 static void log_mpc_crc(struct dc *dc,
125 struct dc_log_buffer_ctx *log_ctx)
127 struct dc_context *dc_ctx = dc->ctx;
128 struct dce_hwseq *hws = dc->hwseq;
130 if (REG(MPC_CRC_RESULT_GB))
131 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
132 REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
133 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
134 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
135 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
138 static void dcn10_log_hubbub_state(struct dc *dc,
139 struct dc_log_buffer_ctx *log_ctx)
141 struct dc_context *dc_ctx = dc->ctx;
142 struct dcn_hubbub_wm wm;
145 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
146 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
148 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
149 " sr_enter sr_exit dram_clk_change\n");
151 for (i = 0; i < 4; i++) {
152 struct dcn_hubbub_wm_set *s;
155 DTN_INFO("WM_Set[%d]:", s->wm_set);
156 DTN_INFO_MICRO_SEC(s->data_urgent);
157 DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
158 DTN_INFO_MICRO_SEC(s->sr_enter);
159 DTN_INFO_MICRO_SEC(s->sr_exit);
160 DTN_INFO_MICRO_SEC(s->dram_clk_change);
167 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
169 struct dc_context *dc_ctx = dc->ctx;
170 struct resource_pool *pool = dc->res_pool;
174 "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
175 for (i = 0; i < pool->pipe_count; i++) {
176 struct hubp *hubp = pool->hubps[i];
177 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
179 hubp->funcs->hubp_read_state(hubp);
182 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
195 s->underflow_status);
196 DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
197 DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
198 DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
203 DTN_INFO("\n=========RQ========\n");
204 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
205 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
206 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
207 for (i = 0; i < pool->pipe_count; i++) {
208 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
209 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
212 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
213 pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
214 rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
215 rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
216 rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
217 rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
218 rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
219 rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
220 rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
221 rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
224 DTN_INFO("========DLG========\n");
225 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
226 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
227 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
228 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
229 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
230 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
231 " x_rp_dlay x_rr_sfl\n");
232 for (i = 0; i < pool->pipe_count; i++) {
233 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
234 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
237 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
238 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
239 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
240 pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
241 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
242 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
243 dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
244 dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
245 dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
246 dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
247 dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
248 dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
249 dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
250 dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
251 dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
252 dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
253 dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
254 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
255 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
256 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
257 dlg_regs->xfc_reg_remote_surface_flip_latency);
260 DTN_INFO("========TTU========\n");
261 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
262 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
263 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
264 for (i = 0; i < pool->pipe_count; i++) {
265 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
266 struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
269 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
270 pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
271 ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
272 ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
273 ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
274 ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
275 ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
276 ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
281 void dcn10_log_hw_state(struct dc *dc,
282 struct dc_log_buffer_ctx *log_ctx)
284 struct dc_context *dc_ctx = dc->ctx;
285 struct resource_pool *pool = dc->res_pool;
290 dcn10_log_hubbub_state(dc, log_ctx);
292 dcn10_log_hubp_states(dc, log_ctx);
294 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
295 " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
296 "C31 C32 C33 C34\n");
297 for (i = 0; i < pool->pipe_count; i++) {
298 struct dpp *dpp = pool->dpps[i];
299 struct dcn_dpp_state s = {0};
301 dpp->funcs->dpp_read_state(dpp, &s);
306 DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
307 "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
310 (s.igam_lut_mode == 0) ? "BypassFixed" :
311 ((s.igam_lut_mode == 1) ? "BypassFloat" :
312 ((s.igam_lut_mode == 2) ? "RAM" :
313 ((s.igam_lut_mode == 3) ? "RAM" :
315 (s.dgam_lut_mode == 0) ? "Bypass" :
316 ((s.dgam_lut_mode == 1) ? "sRGB" :
317 ((s.dgam_lut_mode == 2) ? "Ycc" :
318 ((s.dgam_lut_mode == 3) ? "RAM" :
319 ((s.dgam_lut_mode == 4) ? "RAM" :
321 (s.rgam_lut_mode == 0) ? "Bypass" :
322 ((s.rgam_lut_mode == 1) ? "sRGB" :
323 ((s.rgam_lut_mode == 2) ? "Ycc" :
324 ((s.rgam_lut_mode == 3) ? "RAM" :
325 ((s.rgam_lut_mode == 4) ? "RAM" :
328 s.gamut_remap_c11_c12,
329 s.gamut_remap_c13_c14,
330 s.gamut_remap_c21_c22,
331 s.gamut_remap_c23_c24,
332 s.gamut_remap_c31_c32,
333 s.gamut_remap_c33_c34);
338 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
339 for (i = 0; i < pool->pipe_count; i++) {
340 struct mpcc_state s = {0};
342 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
344 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
345 i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
346 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
351 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
353 for (i = 0; i < pool->timing_generator_count; i++) {
354 struct timing_generator *tg = pool->timing_generators[i];
355 struct dcn_otg_state s = {0};
356 /* Read shared OTG state registers for all DCNx */
357 optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
360 * For DCN2 and greater, a register on the OPP is used to
361 * determine if the CRTC is blanked instead of the OTG. So use
362 * dpg_is_blanked() if exists, otherwise fallback on otg.
364 * TODO: Implement DCN-specific read_otg_state hooks.
366 if (pool->opps[i]->funcs->dpg_is_blanked)
367 s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
369 s.blank_enabled = tg->funcs->is_blanked(tg);
371 //only print if OTG master is enabled
372 if ((s.otg_enabled & 1) == 0)
375 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
393 s.underflow_occurred_status,
396 // Clear underflow for debug purposes
397 // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
398 // This function is called only from Windows or Diags test environment, hence it's safe to clear
399 // it from here without affecting the original intent.
400 tg->funcs->clear_optc_underflow(tg);
404 // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
405 // TODO: Update golden log header to reflect this name change
406 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
407 for (i = 0; i < pool->res_cap->num_dsc; i++) {
408 struct display_stream_compressor *dsc = pool->dscs[i];
409 struct dcn_dsc_state s = {0};
411 dsc->funcs->dsc_read_state(dsc, &s);
412 DTN_INFO("[%d]: %-9d %-12d %-10d\n",
416 s.dsc_bits_per_pixel);
421 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
422 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
423 for (i = 0; i < pool->stream_enc_count; i++) {
424 struct stream_encoder *enc = pool->stream_enc[i];
425 struct enc_state s = {0};
427 if (enc->funcs->enc_read_state) {
428 enc->funcs->enc_read_state(enc, &s);
429 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
432 s.sec_gsp_pps_line_num,
433 s.vbid6_line_reference,
435 s.sec_gsp_pps_enable,
436 s.sec_stream_enable);
442 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
443 for (i = 0; i < dc->link_count; i++) {
444 struct link_encoder *lenc = dc->links[i]->link_enc;
446 struct link_enc_state s = {0};
448 if (lenc && lenc->funcs->read_state) {
449 lenc->funcs->read_state(lenc, &s);
450 DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
453 s.dphy_fec_ready_shadow,
454 s.dphy_fec_active_status,
455 s.dp_link_training_complete);
461 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
462 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
463 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
464 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
465 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
466 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
467 dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
468 dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
469 dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
471 log_mpc_crc(dc, log_ctx);
474 if (pool->hpo_dp_stream_enc_count > 0) {
475 DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n");
476 for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
477 struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
478 struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
480 if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
481 hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
483 DTN_INFO("[%d]: %d %d %6s %d %d %d %d %d\n",
484 hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
485 hpo_dp_se_state.stream_enc_enabled,
486 hpo_dp_se_state.otg_inst,
487 (hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
488 ((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
489 (hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
490 (hpo_dp_se_state.component_depth == 0) ? 6 :
491 ((hpo_dp_se_state.component_depth == 1) ? 8 :
492 (hpo_dp_se_state.component_depth == 2) ? 10 : 12),
493 hpo_dp_se_state.vid_stream_enabled,
494 hpo_dp_se_state.sdp_enabled,
495 hpo_dp_se_state.compressed_format,
496 hpo_dp_se_state.mapped_to_link_enc);
503 /* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
504 if (pool->hpo_dp_link_enc_count) {
505 DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n");
507 for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
508 struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
509 struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
511 if (hpo_dp_link_enc->funcs->read_state) {
512 hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
513 DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n",
514 hpo_dp_link_enc->inst,
515 hpo_dp_le_state.link_enc_enabled,
516 (hpo_dp_le_state.link_mode == 0) ? "TPS1" :
517 (hpo_dp_le_state.link_mode == 1) ? "TPS2" :
518 (hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
519 hpo_dp_le_state.lane_count,
520 hpo_dp_le_state.stream_src[0],
521 hpo_dp_le_state.slot_count[0],
522 hpo_dp_le_state.vc_rate_x[0],
523 hpo_dp_le_state.vc_rate_y[0]);
535 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
537 struct hubp *hubp = pipe_ctx->plane_res.hubp;
538 struct timing_generator *tg = pipe_ctx->stream_res.tg;
540 if (tg->funcs->is_optc_underflow_occurred(tg)) {
541 tg->funcs->clear_optc_underflow(tg);
545 if (hubp->funcs->hubp_get_underflow_status(hubp)) {
546 hubp->funcs->hubp_clear_underflow(hubp);
552 void dcn10_enable_power_gating_plane(
553 struct dce_hwseq *hws,
556 bool force_on = true; /* disable power gating */
562 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
563 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
564 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
565 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
568 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
569 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
570 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
571 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
574 void dcn10_disable_vga(
575 struct dce_hwseq *hws)
577 unsigned int in_vga1_mode = 0;
578 unsigned int in_vga2_mode = 0;
579 unsigned int in_vga3_mode = 0;
580 unsigned int in_vga4_mode = 0;
582 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
583 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
584 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
585 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
587 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
588 in_vga3_mode == 0 && in_vga4_mode == 0)
591 REG_WRITE(D1VGA_CONTROL, 0);
592 REG_WRITE(D2VGA_CONTROL, 0);
593 REG_WRITE(D3VGA_CONTROL, 0);
594 REG_WRITE(D4VGA_CONTROL, 0);
596 /* HW Engineer's Notes:
597 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
598 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
600 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
601 * VGA_TEST_ENABLE, to leave it in the same state as before.
603 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
604 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
608 * dcn10_dpp_pg_control - DPP power gate control.
610 * @hws: dce_hwseq reference.
611 * @dpp_inst: DPP instance reference.
612 * @power_on: true if we want to enable power gate, false otherwise.
614 * Enable or disable power gate in the specific DPP instance.
616 void dcn10_dpp_pg_control(
617 struct dce_hwseq *hws,
618 unsigned int dpp_inst,
621 uint32_t power_gate = power_on ? 0 : 1;
622 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
624 if (hws->ctx->dc->debug.disable_dpp_power_gate)
626 if (REG(DOMAIN1_PG_CONFIG) == 0)
631 REG_UPDATE(DOMAIN1_PG_CONFIG,
632 DOMAIN1_POWER_GATE, power_gate);
634 REG_WAIT(DOMAIN1_PG_STATUS,
635 DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
639 REG_UPDATE(DOMAIN3_PG_CONFIG,
640 DOMAIN3_POWER_GATE, power_gate);
642 REG_WAIT(DOMAIN3_PG_STATUS,
643 DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
647 REG_UPDATE(DOMAIN5_PG_CONFIG,
648 DOMAIN5_POWER_GATE, power_gate);
650 REG_WAIT(DOMAIN5_PG_STATUS,
651 DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
655 REG_UPDATE(DOMAIN7_PG_CONFIG,
656 DOMAIN7_POWER_GATE, power_gate);
658 REG_WAIT(DOMAIN7_PG_STATUS,
659 DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
669 * dcn10_hubp_pg_control - HUBP power gate control.
671 * @hws: dce_hwseq reference.
672 * @hubp_inst: DPP instance reference.
673 * @power_on: true if we want to enable power gate, false otherwise.
675 * Enable or disable power gate in the specific HUBP instance.
677 void dcn10_hubp_pg_control(
678 struct dce_hwseq *hws,
679 unsigned int hubp_inst,
682 uint32_t power_gate = power_on ? 0 : 1;
683 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
685 if (hws->ctx->dc->debug.disable_hubp_power_gate)
687 if (REG(DOMAIN0_PG_CONFIG) == 0)
691 case 0: /* DCHUBP0 */
692 REG_UPDATE(DOMAIN0_PG_CONFIG,
693 DOMAIN0_POWER_GATE, power_gate);
695 REG_WAIT(DOMAIN0_PG_STATUS,
696 DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
699 case 1: /* DCHUBP1 */
700 REG_UPDATE(DOMAIN2_PG_CONFIG,
701 DOMAIN2_POWER_GATE, power_gate);
703 REG_WAIT(DOMAIN2_PG_STATUS,
704 DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
707 case 2: /* DCHUBP2 */
708 REG_UPDATE(DOMAIN4_PG_CONFIG,
709 DOMAIN4_POWER_GATE, power_gate);
711 REG_WAIT(DOMAIN4_PG_STATUS,
712 DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
715 case 3: /* DCHUBP3 */
716 REG_UPDATE(DOMAIN6_PG_CONFIG,
717 DOMAIN6_POWER_GATE, power_gate);
719 REG_WAIT(DOMAIN6_PG_STATUS,
720 DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
729 static void power_on_plane(
730 struct dce_hwseq *hws,
733 DC_LOGGER_INIT(hws->ctx->logger);
734 if (REG(DC_IP_REQUEST_CNTL)) {
735 REG_SET(DC_IP_REQUEST_CNTL, 0,
738 if (hws->funcs.dpp_pg_control)
739 hws->funcs.dpp_pg_control(hws, plane_id, true);
741 if (hws->funcs.hubp_pg_control)
742 hws->funcs.hubp_pg_control(hws, plane_id, true);
744 REG_SET(DC_IP_REQUEST_CNTL, 0,
747 "Un-gated front end for pipe %d\n", plane_id);
751 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
753 struct dce_hwseq *hws = dc->hwseq;
754 struct hubp *hubp = dc->res_pool->hubps[0];
756 if (!hws->wa_state.DEGVIDCN10_253_applied)
759 hubp->funcs->set_blank(hubp, true);
761 REG_SET(DC_IP_REQUEST_CNTL, 0,
764 hws->funcs.hubp_pg_control(hws, 0, false);
765 REG_SET(DC_IP_REQUEST_CNTL, 0,
768 hws->wa_state.DEGVIDCN10_253_applied = false;
771 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
773 struct dce_hwseq *hws = dc->hwseq;
774 struct hubp *hubp = dc->res_pool->hubps[0];
777 if (dc->debug.disable_stutter)
780 if (!hws->wa.DEGVIDCN10_253)
783 for (i = 0; i < dc->res_pool->pipe_count; i++) {
784 if (!dc->res_pool->hubps[i]->power_gated)
788 /* all pipe power gated, apply work around to enable stutter. */
790 REG_SET(DC_IP_REQUEST_CNTL, 0,
793 hws->funcs.hubp_pg_control(hws, 0, true);
794 REG_SET(DC_IP_REQUEST_CNTL, 0,
797 hubp->funcs->set_hubp_blank_en(hubp, false);
798 hws->wa_state.DEGVIDCN10_253_applied = true;
801 void dcn10_bios_golden_init(struct dc *dc)
803 struct dce_hwseq *hws = dc->hwseq;
804 struct dc_bios *bp = dc->ctx->dc_bios;
806 bool allow_self_fresh_force_enable = true;
808 if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
811 if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
812 allow_self_fresh_force_enable =
813 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
816 /* WA for making DF sleep when idle after resume from S0i3.
817 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
818 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
819 * before calling command table and it changed to 1 after,
820 * it should be set back to 0.
823 /* initialize dcn global */
824 bp->funcs->enable_disp_power_gating(bp,
825 CONTROLLER_ID_D0, ASIC_PIPE_INIT);
827 for (i = 0; i < dc->res_pool->pipe_count; i++) {
828 /* initialize dcn per pipe */
829 bp->funcs->enable_disp_power_gating(bp,
830 CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
833 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
834 if (allow_self_fresh_force_enable == false &&
835 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
836 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
837 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
841 static void false_optc_underflow_wa(
843 const struct dc_stream_state *stream,
844 struct timing_generator *tg)
849 if (!dc->hwseq->wa.false_optc_underflow)
852 underflow = tg->funcs->is_optc_underflow_occurred(tg);
854 for (i = 0; i < dc->res_pool->pipe_count; i++) {
855 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
857 if (old_pipe_ctx->stream != stream)
860 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
863 if (tg->funcs->set_blank_data_double_buffer)
864 tg->funcs->set_blank_data_double_buffer(tg, true);
866 if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
867 tg->funcs->clear_optc_underflow(tg);
870 static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
872 struct pipe_ctx *other_pipe;
873 int vready_offset = pipe->pipe_dlg_param.vready_offset;
875 /* Always use the largest vready_offset of all connected pipes */
876 for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
877 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
878 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
880 for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
881 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
882 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
884 for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
885 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
886 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
888 for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
889 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
890 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
893 return vready_offset;
896 enum dc_status dcn10_enable_stream_timing(
897 struct pipe_ctx *pipe_ctx,
898 struct dc_state *context,
901 struct dc_stream_state *stream = pipe_ctx->stream;
902 enum dc_color_space color_space;
903 struct tg_color black_color = {0};
905 /* by upper caller loop, pipe0 is parent pipe and be called first.
906 * back end is set up by for pipe0. Other children pipe share back end
907 * with pipe 0. No program is needed.
909 if (pipe_ctx->top_pipe != NULL)
912 /* TODO check if timing_changed, disable stream if timing changed */
914 /* HW program guide assume display already disable
915 * by unplug sequence. OTG assume stop.
917 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
919 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
920 pipe_ctx->clock_source,
921 &pipe_ctx->stream_res.pix_clk_params,
922 link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
923 &pipe_ctx->pll_settings)) {
925 return DC_ERROR_UNEXPECTED;
928 if (dc_is_hdmi_tmds_signal(stream->signal)) {
929 stream->link->phy_state.symclk_ref_cnts.otg = 1;
930 if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
931 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
933 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
936 pipe_ctx->stream_res.tg->funcs->program_timing(
937 pipe_ctx->stream_res.tg,
939 calculate_vready_offset_for_group(pipe_ctx),
940 pipe_ctx->pipe_dlg_param.vstartup_start,
941 pipe_ctx->pipe_dlg_param.vupdate_offset,
942 pipe_ctx->pipe_dlg_param.vupdate_width,
943 pipe_ctx->stream->signal,
946 #if 0 /* move to after enable_crtc */
947 /* TODO: OPP FMT, ABM. etc. should be done here. */
948 /* or FPGA now. instance 0 only. TODO: move to opp.c */
950 inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
952 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
953 pipe_ctx->stream_res.opp,
954 &stream->bit_depth_params,
957 /* program otg blank color */
958 color_space = stream->output_color_space;
959 color_space_to_black_color(dc, color_space, &black_color);
962 * The way 420 is packed, 2 channels carry Y component, 1 channel
963 * alternate between Cb and Cr, so both channels need the pixel
966 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
967 black_color.color_r_cr = black_color.color_g_y;
969 if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
970 pipe_ctx->stream_res.tg->funcs->set_blank_color(
971 pipe_ctx->stream_res.tg,
974 if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
975 !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
976 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
977 hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
978 false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
981 /* VTG is within DCHUB command block. DCFCLK is always on */
982 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
984 return DC_ERROR_UNEXPECTED;
987 /* TODO program crtc source select for non-virtual signal*/
988 /* TODO program FMT */
989 /* TODO setup link_enc */
990 /* TODO set stream attributes */
991 /* TODO program audio */
992 /* TODO enable stream if timing changed */
993 /* TODO unblank stream if DP */
998 static void dcn10_reset_back_end_for_pipe(
1000 struct pipe_ctx *pipe_ctx,
1001 struct dc_state *context)
1004 struct dc_link *link;
1005 DC_LOGGER_INIT(dc->ctx->logger);
1006 if (pipe_ctx->stream_res.stream_enc == NULL) {
1007 pipe_ctx->stream = NULL;
1011 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1012 link = pipe_ctx->stream->link;
1013 /* DPMS may already disable or */
1014 /* dpms_off status is incorrect due to fastboot
1015 * feature. When system resume from S4 with second
1016 * screen only, the dpms_off would be true but
1017 * VBIOS lit up eDP, so check link status too.
1019 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1020 core_link_disable_stream(pipe_ctx);
1021 else if (pipe_ctx->stream_res.audio)
1022 dc->hwss.disable_audio_stream(pipe_ctx);
1024 if (pipe_ctx->stream_res.audio) {
1025 /*disable az_endpoint*/
1026 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1029 if (dc->caps.dynamic_audio == true) {
1030 /*we have to dynamic arbitrate the audio endpoints*/
1031 /*we free the resource, need reset is_audio_acquired*/
1032 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1033 pipe_ctx->stream_res.audio, false);
1034 pipe_ctx->stream_res.audio = NULL;
1039 /* by upper caller loop, parent pipe: pipe0, will be reset last.
1040 * back end share by all pipes and will be disable only when disable
1043 if (pipe_ctx->top_pipe == NULL) {
1045 if (pipe_ctx->stream_res.abm)
1046 dc->hwss.set_abm_immediate_disable(pipe_ctx);
1048 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1050 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1051 if (pipe_ctx->stream_res.tg->funcs->set_drr)
1052 pipe_ctx->stream_res.tg->funcs->set_drr(
1053 pipe_ctx->stream_res.tg, NULL);
1054 pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
1057 for (i = 0; i < dc->res_pool->pipe_count; i++)
1058 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1061 if (i == dc->res_pool->pipe_count)
1064 pipe_ctx->stream = NULL;
1065 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1066 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1069 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
1073 bool need_recover = true;
1075 if (!dc->debug.recovery_enabled)
1078 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1079 struct pipe_ctx *pipe_ctx =
1080 &dc->current_state->res_ctx.pipe_ctx[i];
1081 if (pipe_ctx != NULL) {
1082 hubp = pipe_ctx->plane_res.hubp;
1083 if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
1084 if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
1085 /* one pipe underflow, we will reset all the pipes*/
1086 need_recover = true;
1094 DCHUBP_CNTL:HUBP_BLANK_EN=1
1095 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1096 DCHUBP_CNTL:HUBP_DISABLE=1
1097 DCHUBP_CNTL:HUBP_DISABLE=0
1098 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1099 DCSURF_PRIMARY_SURFACE_ADDRESS
1100 DCHUBP_CNTL:HUBP_BLANK_EN=0
1103 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1104 struct pipe_ctx *pipe_ctx =
1105 &dc->current_state->res_ctx.pipe_ctx[i];
1106 if (pipe_ctx != NULL) {
1107 hubp = pipe_ctx->plane_res.hubp;
1108 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1109 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1110 hubp->funcs->set_hubp_blank_en(hubp, true);
1113 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1114 hubbub1_soft_reset(dc->res_pool->hubbub, true);
1116 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1117 struct pipe_ctx *pipe_ctx =
1118 &dc->current_state->res_ctx.pipe_ctx[i];
1119 if (pipe_ctx != NULL) {
1120 hubp = pipe_ctx->plane_res.hubp;
1121 /*DCHUBP_CNTL:HUBP_DISABLE=1*/
1122 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1123 hubp->funcs->hubp_disable_control(hubp, true);
1126 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1127 struct pipe_ctx *pipe_ctx =
1128 &dc->current_state->res_ctx.pipe_ctx[i];
1129 if (pipe_ctx != NULL) {
1130 hubp = pipe_ctx->plane_res.hubp;
1131 /*DCHUBP_CNTL:HUBP_DISABLE=0*/
1132 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1133 hubp->funcs->hubp_disable_control(hubp, true);
1136 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1137 hubbub1_soft_reset(dc->res_pool->hubbub, false);
1138 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1139 struct pipe_ctx *pipe_ctx =
1140 &dc->current_state->res_ctx.pipe_ctx[i];
1141 if (pipe_ctx != NULL) {
1142 hubp = pipe_ctx->plane_res.hubp;
1143 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1144 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1145 hubp->funcs->set_hubp_blank_en(hubp, true);
1152 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1154 struct hubbub *hubbub = dc->res_pool->hubbub;
1155 static bool should_log_hw_state; /* prevent hw state log by default */
1157 if (!hubbub->funcs->verify_allow_pstate_change_high)
1160 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
1163 if (should_log_hw_state)
1164 dcn10_log_hw_state(dc, NULL);
1166 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1167 BREAK_TO_DEBUGGER();
1168 if (dcn10_hw_wa_force_recovery(dc)) {
1170 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
1171 BREAK_TO_DEBUGGER();
1176 /* trigger HW to start disconnect plane from stream on the next vsync */
1177 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1179 struct dce_hwseq *hws = dc->hwseq;
1180 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1181 int dpp_id = pipe_ctx->plane_res.dpp->inst;
1182 struct mpc *mpc = dc->res_pool->mpc;
1183 struct mpc_tree *mpc_tree_params;
1184 struct mpcc *mpcc_to_remove = NULL;
1185 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1187 mpc_tree_params = &(opp->mpc_tree_params);
1188 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1191 if (mpcc_to_remove == NULL)
1194 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1195 // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
1196 // so don't wait for MPCC_IDLE in the programming sequence
1197 if (opp != NULL && !pipe_ctx->plane_state->is_phantom)
1198 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1200 dc->optimized_required = true;
1202 if (hubp->funcs->hubp_disconnect)
1203 hubp->funcs->hubp_disconnect(hubp);
1205 if (dc->debug.sanity_checks)
1206 hws->funcs.verify_allow_pstate_change_high(dc);
1210 * dcn10_plane_atomic_power_down - Power down plane components.
1212 * @dc: dc struct reference. used for grab hwseq.
1213 * @dpp: dpp struct reference.
1214 * @hubp: hubp struct reference.
1216 * Keep in mind that this operation requires a power gate configuration;
1217 * however, requests for switch power gate are precisely controlled to avoid
1218 * problems. For this reason, power gate request is usually disabled. This
1219 * function first needs to enable the power gate request before disabling DPP
1220 * and HUBP. Finally, it disables the power gate request again.
1222 void dcn10_plane_atomic_power_down(struct dc *dc,
1226 struct dce_hwseq *hws = dc->hwseq;
1227 DC_LOGGER_INIT(dc->ctx->logger);
1229 if (REG(DC_IP_REQUEST_CNTL)) {
1230 REG_SET(DC_IP_REQUEST_CNTL, 0,
1233 if (hws->funcs.dpp_pg_control)
1234 hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1236 if (hws->funcs.hubp_pg_control)
1237 hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1239 dpp->funcs->dpp_reset(dpp);
1240 REG_SET(DC_IP_REQUEST_CNTL, 0,
1243 "Power gated front end %d\n", hubp->inst);
1247 /* disable HW used by plane.
1248 * note: cannot disable until disconnect is complete
1250 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1252 struct dce_hwseq *hws = dc->hwseq;
1253 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1254 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1255 int opp_id = hubp->opp_id;
1257 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1259 hubp->funcs->hubp_clk_cntl(hubp, false);
1261 dpp->funcs->dpp_dppclk_control(dpp, false, false);
1263 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1264 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1265 pipe_ctx->stream_res.opp,
1268 hubp->power_gated = true;
1269 dc->optimized_required = false; /* We're powering off, no need to optimize */
1271 hws->funcs.plane_atomic_power_down(dc,
1272 pipe_ctx->plane_res.dpp,
1273 pipe_ctx->plane_res.hubp);
1275 pipe_ctx->stream = NULL;
1276 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1277 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1278 pipe_ctx->top_pipe = NULL;
1279 pipe_ctx->bottom_pipe = NULL;
1280 pipe_ctx->plane_state = NULL;
1283 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1285 struct dce_hwseq *hws = dc->hwseq;
1286 DC_LOGGER_INIT(dc->ctx->logger);
1288 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1291 hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1293 apply_DEGVIDCN10_253_wa(dc);
1295 DC_LOG_DC("Power down front end %d\n",
1296 pipe_ctx->pipe_idx);
1299 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1302 struct dce_hwseq *hws = dc->hwseq;
1303 struct hubbub *hubbub = dc->res_pool->hubbub;
1304 bool can_apply_seamless_boot = false;
1306 for (i = 0; i < context->stream_count; i++) {
1307 if (context->streams[i]->apply_seamless_boot_optimization) {
1308 can_apply_seamless_boot = true;
1313 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1314 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1315 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1317 /* There is assumption that pipe_ctx is not mapping irregularly
1318 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1319 * we will use the pipe, so don't disable
1321 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1324 /* Blank controller using driver code instead of
1327 if (tg->funcs->is_tg_enabled(tg)) {
1328 if (hws->funcs.init_blank != NULL) {
1329 hws->funcs.init_blank(dc, tg);
1330 tg->funcs->lock(tg);
1332 tg->funcs->lock(tg);
1333 tg->funcs->set_blank(tg, true);
1334 hwss_wait_for_blank_complete(tg);
1339 /* Reset det size */
1340 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1341 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1342 struct hubp *hubp = dc->res_pool->hubps[i];
1344 /* Do not need to reset for seamless boot */
1345 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1348 if (hubbub && hubp) {
1349 if (hubbub->funcs->program_det_size)
1350 hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
1354 /* num_opp will be equal to number of mpcc */
1355 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1356 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1358 /* Cannot reset the MPC mux if seamless boot */
1359 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1362 dc->res_pool->mpc->funcs->mpc_init_single_inst(
1363 dc->res_pool->mpc, i);
1366 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1367 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1368 struct hubp *hubp = dc->res_pool->hubps[i];
1369 struct dpp *dpp = dc->res_pool->dpps[i];
1370 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1372 /* There is assumption that pipe_ctx is not mapping irregularly
1373 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1374 * we will use the pipe, so don't disable
1376 if (can_apply_seamless_boot &&
1377 pipe_ctx->stream != NULL &&
1378 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1379 pipe_ctx->stream_res.tg)) {
1380 // Enable double buffering for OTG_BLANK no matter if
1381 // seamless boot is enabled or not to suppress global sync
1382 // signals when OTG blanked. This is to prevent pipe from
1383 // requesting data while in PSR.
1384 tg->funcs->tg_init(tg);
1385 hubp->power_gated = true;
1389 /* Disable on the current state so the new one isn't cleared. */
1390 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1392 dpp->funcs->dpp_reset(dpp);
1394 pipe_ctx->stream_res.tg = tg;
1395 pipe_ctx->pipe_idx = i;
1397 pipe_ctx->plane_res.hubp = hubp;
1398 pipe_ctx->plane_res.dpp = dpp;
1399 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1400 hubp->mpcc_id = dpp->inst;
1401 hubp->opp_id = OPP_ID_INVALID;
1402 hubp->power_gated = false;
1404 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1405 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1406 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1407 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1409 hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1411 if (tg->funcs->is_tg_enabled(tg))
1412 tg->funcs->unlock(tg);
1414 dc->hwss.disable_plane(dc, pipe_ctx);
1416 pipe_ctx->stream_res.tg = NULL;
1417 pipe_ctx->plane_res.hubp = NULL;
1419 if (tg->funcs->is_tg_enabled(tg)) {
1420 if (tg->funcs->init_odm)
1421 tg->funcs->init_odm(tg);
1424 tg->funcs->tg_init(tg);
1427 /* Power gate DSCs */
1428 if (hws->funcs.dsc_pg_control != NULL) {
1429 uint32_t num_opps = 0;
1430 uint32_t opp_id_src0 = OPP_ID_INVALID;
1431 uint32_t opp_id_src1 = OPP_ID_INVALID;
1433 // Step 1: To find out which OPTC is running & OPTC DSC is ON
1434 // We can't use res_pool->res_cap->num_timing_generator to check
1435 // Because it records display pipes default setting built in driver,
1436 // not display pipes of the current chip.
1437 // Some ASICs would be fused display pipes less than the default setting.
1438 // In dcnxx_resource_construct function, driver would obatin real information.
1439 for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1440 uint32_t optc_dsc_state = 0;
1441 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1443 if (tg->funcs->is_tg_enabled(tg)) {
1444 if (tg->funcs->get_dsc_status)
1445 tg->funcs->get_dsc_status(tg, &optc_dsc_state);
1446 // Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
1447 // non-zero value is DSC enabled
1448 if (optc_dsc_state != 0) {
1449 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1455 // Step 2: To power down DSC but skip DSC of running OPTC
1456 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1457 struct dcn_dsc_state s = {0};
1459 dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
1461 if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
1462 s.dsc_clock_en && s.dsc_fw_en)
1465 hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
1470 void dcn10_init_hw(struct dc *dc)
1473 struct abm *abm = dc->res_pool->abm;
1474 struct dmcu *dmcu = dc->res_pool->dmcu;
1475 struct dce_hwseq *hws = dc->hwseq;
1476 struct dc_bios *dcb = dc->ctx->dc_bios;
1477 struct resource_pool *res_pool = dc->res_pool;
1478 uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1479 bool is_optimized_init_done = false;
1481 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1482 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1484 /* Align bw context with hw config when system resume. */
1485 if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
1486 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
1487 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
1490 // Initialize the dccg
1491 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1492 dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1494 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1496 REG_WRITE(REFCLK_CNTL, 0);
1497 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1498 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1500 if (!dc->debug.disable_clock_gate) {
1501 /* enable all DCN clock gating */
1502 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1504 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1506 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1509 //Enable ability to power gate / don't force power on permanently
1510 if (hws->funcs.enable_power_gating_plane)
1511 hws->funcs.enable_power_gating_plane(hws, true);
1516 if (!dcb->funcs->is_accelerated_mode(dcb))
1517 hws->funcs.disable_vga(dc->hwseq);
1519 hws->funcs.bios_golden_init(dc);
1521 if (dc->ctx->dc_bios->fw_info_valid) {
1522 res_pool->ref_clocks.xtalin_clock_inKhz =
1523 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1525 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1526 if (res_pool->dccg && res_pool->hubbub) {
1528 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1529 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1530 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
1532 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1533 res_pool->ref_clocks.dccg_ref_clock_inKhz,
1534 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
1536 // Not all ASICs have DCCG sw component
1537 res_pool->ref_clocks.dccg_ref_clock_inKhz =
1538 res_pool->ref_clocks.xtalin_clock_inKhz;
1539 res_pool->ref_clocks.dchub_ref_clock_inKhz =
1540 res_pool->ref_clocks.xtalin_clock_inKhz;
1544 ASSERT_CRITICAL(false);
1546 for (i = 0; i < dc->link_count; i++) {
1547 /* Power up AND update implementation according to the
1548 * required signal (which may be different from the
1549 * default signal on connector).
1551 struct dc_link *link = dc->links[i];
1553 if (!is_optimized_init_done)
1554 link->link_enc->funcs->hw_init(link->link_enc);
1556 /* Check for enabled DIG to identify enabled display */
1557 if (link->link_enc->funcs->is_dig_enabled &&
1558 link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1559 link->link_status.link_active = true;
1560 if (link->link_enc->funcs->fec_is_active &&
1561 link->link_enc->funcs->fec_is_active(link->link_enc))
1562 link->fec_state = dc_link_fec_enabled;
1566 /* we want to turn off all dp displays before doing detection */
1567 dc_link_blank_all_dp_displays(dc);
1569 if (hws->funcs.enable_power_gating_plane)
1570 hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1572 /* If taking control over from VBIOS, we may want to optimize our first
1573 * mode set, so we need to skip powering down pipes until we know which
1574 * pipes we want to use.
1575 * Otherwise, if taking control is not possible, we need to power
1578 if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
1579 if (!is_optimized_init_done) {
1580 hws->funcs.init_pipes(dc, dc->current_state);
1581 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1582 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1583 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1587 if (!is_optimized_init_done) {
1589 for (i = 0; i < res_pool->audio_count; i++) {
1590 struct audio *audio = res_pool->audios[i];
1592 audio->funcs->hw_init(audio);
1595 for (i = 0; i < dc->link_count; i++) {
1596 struct dc_link *link = dc->links[i];
1598 if (link->panel_cntl)
1599 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1603 abm->funcs->abm_init(abm, backlight);
1605 if (dmcu != NULL && !dmcu->auto_load_dmcu)
1606 dmcu->funcs->dmcu_init(dmcu);
1609 if (abm != NULL && dmcu != NULL)
1610 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1612 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1613 if (!is_optimized_init_done)
1614 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1616 if (!dc->debug.disable_clock_gate) {
1617 /* enable all DCN clock gating */
1618 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1620 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1622 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1625 if (dc->clk_mgr->funcs->notify_wm_ranges)
1626 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1629 /* In headless boot cases, DIG may be turned
1630 * on which causes HW/SW discrepancies.
1631 * To avoid this, power down hardware on boot
1632 * if DIG is turned on
1634 void dcn10_power_down_on_boot(struct dc *dc)
1636 struct dc_link *edp_links[MAX_NUM_EDP];
1637 struct dc_link *edp_link = NULL;
1641 get_edp_links(dc, edp_links, &edp_num);
1643 edp_link = edp_links[0];
1645 if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1646 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1647 dc->hwseq->funcs.edp_backlight_control &&
1648 dc->hwss.power_down &&
1649 dc->hwss.edp_power_control) {
1650 dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1651 dc->hwss.power_down(dc);
1652 dc->hwss.edp_power_control(edp_link, false);
1654 for (i = 0; i < dc->link_count; i++) {
1655 struct dc_link *link = dc->links[i];
1657 if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
1658 link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1659 dc->hwss.power_down) {
1660 dc->hwss.power_down(dc);
1668 * Call update_clocks with empty context
1669 * to send DISPLAY_OFF
1670 * Otherwise DISPLAY_OFF may not be asserted
1672 if (dc->clk_mgr->funcs->set_low_power_state)
1673 dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1676 void dcn10_reset_hw_ctx_wrap(
1678 struct dc_state *context)
1681 struct dce_hwseq *hws = dc->hwseq;
1684 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1685 struct pipe_ctx *pipe_ctx_old =
1686 &dc->current_state->res_ctx.pipe_ctx[i];
1687 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1689 if (!pipe_ctx_old->stream)
1692 if (pipe_ctx_old->top_pipe)
1695 if (!pipe_ctx->stream ||
1696 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1697 struct clock_source *old_clk = pipe_ctx_old->clock_source;
1699 dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1700 if (hws->funcs.enable_stream_gating)
1701 hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1703 old_clk->funcs->cs_power_down(old_clk);
1708 static bool patch_address_for_sbs_tb_stereo(
1709 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1711 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1712 bool sec_split = pipe_ctx->top_pipe &&
1713 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1714 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1715 (pipe_ctx->stream->timing.timing_3d_format ==
1716 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1717 pipe_ctx->stream->timing.timing_3d_format ==
1718 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1719 *addr = plane_state->address.grph_stereo.left_addr;
1720 plane_state->address.grph_stereo.left_addr =
1721 plane_state->address.grph_stereo.right_addr;
1724 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1725 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1726 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1727 plane_state->address.grph_stereo.right_addr =
1728 plane_state->address.grph_stereo.left_addr;
1729 plane_state->address.grph_stereo.right_meta_addr =
1730 plane_state->address.grph_stereo.left_meta_addr;
1736 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1738 bool addr_patched = false;
1739 PHYSICAL_ADDRESS_LOC addr;
1740 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1742 if (plane_state == NULL)
1745 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1747 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1748 pipe_ctx->plane_res.hubp,
1749 &plane_state->address,
1750 plane_state->flip_immediate);
1752 plane_state->status.requested_address = plane_state->address;
1754 if (plane_state->flip_immediate)
1755 plane_state->status.current_address = plane_state->address;
1758 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1761 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1762 const struct dc_plane_state *plane_state)
1764 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1765 const struct dc_transfer_func *tf = NULL;
1768 if (dpp_base == NULL)
1771 if (plane_state->in_transfer_func)
1772 tf = plane_state->in_transfer_func;
1774 if (plane_state->gamma_correction &&
1775 !dpp_base->ctx->dc->debug.always_use_regamma
1776 && !plane_state->gamma_correction->is_identity
1777 && dce_use_lut(plane_state->format))
1778 dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1781 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1782 else if (tf->type == TF_TYPE_PREDEFINED) {
1784 case TRANSFER_FUNCTION_SRGB:
1785 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1787 case TRANSFER_FUNCTION_BT709:
1788 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1790 case TRANSFER_FUNCTION_LINEAR:
1791 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1793 case TRANSFER_FUNCTION_PQ:
1794 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1795 cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1796 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1803 } else if (tf->type == TF_TYPE_BYPASS) {
1804 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1806 cm_helper_translate_curve_to_degamma_hw_format(tf,
1807 &dpp_base->degamma_params);
1808 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1809 &dpp_base->degamma_params);
1816 #define MAX_NUM_HW_POINTS 0x200
1818 static void log_tf(struct dc_context *ctx,
1819 struct dc_transfer_func *tf, uint32_t hw_points_num)
1821 // DC_LOG_GAMMA is default logging of all hw points
1822 // DC_LOG_ALL_GAMMA logs all points, not only hw points
1823 // DC_LOG_ALL_TF_POINTS logs all channels of the tf
1826 DC_LOGGER_INIT(ctx->logger);
1827 DC_LOG_GAMMA("Gamma Correction TF");
1828 DC_LOG_ALL_GAMMA("Logging all tf points...");
1829 DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1831 for (i = 0; i < hw_points_num; i++) {
1832 DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1833 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1834 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1837 for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1838 DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1839 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1840 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1844 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1845 const struct dc_stream_state *stream)
1847 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1852 dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1854 if (stream->out_transfer_func &&
1855 stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1856 stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1857 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1859 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1862 else if (cm_helper_translate_curve_to_hw_format(
1863 stream->out_transfer_func,
1864 &dpp->regamma_params, false)) {
1865 dpp->funcs->dpp_program_regamma_pwl(
1867 &dpp->regamma_params, OPP_REGAMMA_USER);
1869 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1871 if (stream != NULL && stream->ctx != NULL &&
1872 stream->out_transfer_func != NULL) {
1874 stream->out_transfer_func,
1875 dpp->regamma_params.hw_points_num);
1881 void dcn10_pipe_control_lock(
1883 struct pipe_ctx *pipe,
1886 struct dce_hwseq *hws = dc->hwseq;
1888 /* use TG master update lock to lock everything on the TG
1889 * therefore only top pipe need to lock
1891 if (!pipe || pipe->top_pipe)
1894 if (dc->debug.sanity_checks)
1895 hws->funcs.verify_allow_pstate_change_high(dc);
1898 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1900 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1902 if (dc->debug.sanity_checks)
1903 hws->funcs.verify_allow_pstate_change_high(dc);
1907 * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1909 * Software keepout workaround to prevent cursor update locking from stalling
1910 * out cursor updates indefinitely or from old values from being retained in
1911 * the case where the viewport changes in the same frame as the cursor.
1913 * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1914 * too close to VUPDATE, then stall out until VUPDATE finishes.
1916 * TODO: Optimize cursor programming to be once per frame before VUPDATE
1917 * to avoid the need for this workaround.
1919 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1921 struct dc_stream_state *stream = pipe_ctx->stream;
1922 struct crtc_position position;
1923 uint32_t vupdate_start, vupdate_end;
1924 unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1925 unsigned int us_per_line, us_vupdate;
1927 if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1930 if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1933 dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1936 dc->hwss.get_position(&pipe_ctx, 1, &position);
1937 vpos = position.vertical_count;
1939 /* Avoid wraparound calculation issues */
1940 vupdate_start += stream->timing.v_total;
1941 vupdate_end += stream->timing.v_total;
1942 vpos += stream->timing.v_total;
1944 if (vpos <= vupdate_start) {
1945 /* VPOS is in VACTIVE or back porch. */
1946 lines_to_vupdate = vupdate_start - vpos;
1947 } else if (vpos > vupdate_end) {
1948 /* VPOS is in the front porch. */
1951 /* VPOS is in VUPDATE. */
1952 lines_to_vupdate = 0;
1955 /* Calculate time until VUPDATE in microseconds. */
1957 stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1958 us_to_vupdate = lines_to_vupdate * us_per_line;
1960 /* 70 us is a conservative estimate of cursor update time*/
1961 if (us_to_vupdate > 70)
1964 /* Stall out until the cursor update completes. */
1965 if (vupdate_end < vupdate_start)
1966 vupdate_end += stream->timing.v_total;
1967 us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1968 udelay(us_to_vupdate + us_vupdate);
1971 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1973 /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1974 if (!pipe || pipe->top_pipe)
1977 /* Prevent cursor lock from stalling out cursor updates. */
1979 delay_cursor_until_vupdate(dc, pipe);
1981 if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1982 union dmub_hw_lock_flags hw_locks = { 0 };
1983 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1985 hw_locks.bits.lock_cursor = 1;
1986 inst_flags.opp_inst = pipe->stream_res.opp->inst;
1988 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1993 dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1994 pipe->stream_res.opp->inst, lock);
1997 static bool wait_for_reset_trigger_to_occur(
1998 struct dc_context *dc_ctx,
1999 struct timing_generator *tg)
2003 /* To avoid endless loop we wait at most
2004 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
2005 const uint32_t frames_to_wait_on_triggered_reset = 10;
2008 for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
2010 if (!tg->funcs->is_counter_moving(tg)) {
2011 DC_ERROR("TG counter is not moving!\n");
2015 if (tg->funcs->did_triggered_reset_occur(tg)) {
2017 /* usually occurs at i=1 */
2018 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
2023 /* Wait for one frame. */
2024 tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
2025 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
2029 DC_ERROR("GSL: Timeout on reset trigger!\n");
2034 static uint64_t reduceSizeAndFraction(uint64_t *numerator,
2035 uint64_t *denominator,
2036 bool checkUint32Bounary)
2039 bool ret = checkUint32Bounary == false;
2040 uint64_t max_int32 = 0xffffffff;
2041 uint64_t num, denom;
2042 static const uint16_t prime_numbers[] = {
2043 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
2044 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
2045 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
2046 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
2047 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
2048 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
2049 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
2050 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
2051 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
2052 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
2053 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
2054 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
2055 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
2056 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
2057 941, 947, 953, 967, 971, 977, 983, 991, 997};
2058 int count = ARRAY_SIZE(prime_numbers);
2061 denom = *denominator;
2062 for (i = 0; i < count; i++) {
2063 uint32_t num_remainder, denom_remainder;
2064 uint64_t num_result, denom_result;
2065 if (checkUint32Bounary &&
2066 num <= max_int32 && denom <= max_int32) {
2071 num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
2072 denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
2073 if (num_remainder == 0 && denom_remainder == 0) {
2075 denom = denom_result;
2077 } while (num_remainder == 0 && denom_remainder == 0);
2080 *denominator = denom;
2084 static bool is_low_refresh_rate(struct pipe_ctx *pipe)
2086 uint32_t master_pipe_refresh_rate =
2087 pipe->stream->timing.pix_clk_100hz * 100 /
2088 pipe->stream->timing.h_total /
2089 pipe->stream->timing.v_total;
2090 return master_pipe_refresh_rate <= 30;
2093 static uint8_t get_clock_divider(struct pipe_ctx *pipe,
2094 bool account_low_refresh_rate)
2096 uint32_t clock_divider = 1;
2097 uint32_t numpipes = 1;
2099 if (account_low_refresh_rate && is_low_refresh_rate(pipe))
2102 if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2105 while (pipe->next_odm_pipe) {
2106 pipe = pipe->next_odm_pipe;
2109 clock_divider *= numpipes;
2111 return clock_divider;
2114 static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
2115 struct pipe_ctx *grouped_pipes[])
2117 struct dc_context *dc_ctx = dc->ctx;
2118 int i, master = -1, embedded = -1;
2119 struct dc_crtc_timing *hw_crtc_timing;
2120 uint64_t phase[MAX_PIPES];
2121 uint64_t modulo[MAX_PIPES];
2124 uint32_t embedded_pix_clk_100hz;
2125 uint16_t embedded_h_total;
2126 uint16_t embedded_v_total;
2127 uint32_t dp_ref_clk_100hz =
2128 dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2130 hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
2131 if (!hw_crtc_timing)
2134 if (dc->config.vblank_alignment_dto_params &&
2135 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2137 (dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2139 (dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2140 embedded_pix_clk_100hz =
2141 dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2143 for (i = 0; i < group_size; i++) {
2144 grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2145 grouped_pipes[i]->stream_res.tg,
2146 &hw_crtc_timing[i]);
2147 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2148 dc->res_pool->dp_clock_source,
2149 grouped_pipes[i]->stream_res.tg->inst,
2151 hw_crtc_timing[i].pix_clk_100hz = pclk;
2152 if (dc_is_embedded_signal(
2153 grouped_pipes[i]->stream->signal)) {
2156 phase[i] = embedded_pix_clk_100hz*100;
2157 modulo[i] = dp_ref_clk_100hz*100;
2160 phase[i] = (uint64_t)embedded_pix_clk_100hz*
2161 hw_crtc_timing[i].h_total*
2162 hw_crtc_timing[i].v_total;
2163 phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2164 modulo[i] = (uint64_t)dp_ref_clk_100hz*
2168 if (reduceSizeAndFraction(&phase[i],
2169 &modulo[i], true) == false) {
2171 * this will help to stop reporting
2172 * this timing synchronizable
2174 DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2175 grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2180 for (i = 0; i < group_size; i++) {
2181 if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2182 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2183 dc->res_pool->dp_clock_source,
2184 grouped_pipes[i]->stream_res.tg->inst,
2185 phase[i], modulo[i]);
2186 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2187 dc->res_pool->dp_clock_source,
2188 grouped_pipes[i]->stream_res.tg->inst, &pclk);
2189 grouped_pipes[i]->stream->timing.pix_clk_100hz =
2190 pclk*get_clock_divider(grouped_pipes[i], false);
2198 kfree(hw_crtc_timing);
2202 void dcn10_enable_vblanks_synchronization(
2206 struct pipe_ctx *grouped_pipes[])
2208 struct dc_context *dc_ctx = dc->ctx;
2209 struct output_pixel_processor *opp;
2210 struct timing_generator *tg;
2211 int i, width, height, master;
2213 for (i = 1; i < group_size; i++) {
2214 opp = grouped_pipes[i]->stream_res.opp;
2215 tg = grouped_pipes[i]->stream_res.tg;
2216 tg->funcs->get_otg_active_size(tg, &width, &height);
2218 if (!tg->funcs->is_tg_enabled(tg)) {
2219 DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2223 if (opp->funcs->opp_program_dpg_dimensions)
2224 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2227 for (i = 0; i < group_size; i++) {
2228 if (grouped_pipes[i]->stream == NULL)
2230 grouped_pipes[i]->stream->vblank_synchronized = false;
2231 grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2234 DC_SYNC_INFO("Aligning DP DTOs\n");
2236 master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2238 DC_SYNC_INFO("Synchronizing VBlanks\n");
2241 for (i = 0; i < group_size; i++) {
2242 if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2243 grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2244 grouped_pipes[master]->stream_res.tg,
2245 grouped_pipes[i]->stream_res.tg,
2246 grouped_pipes[master]->stream->timing.pix_clk_100hz,
2247 grouped_pipes[i]->stream->timing.pix_clk_100hz,
2248 get_clock_divider(grouped_pipes[master], false),
2249 get_clock_divider(grouped_pipes[i], false));
2250 grouped_pipes[i]->stream->vblank_synchronized = true;
2252 grouped_pipes[master]->stream->vblank_synchronized = true;
2253 DC_SYNC_INFO("Sync complete\n");
2256 for (i = 1; i < group_size; i++) {
2257 opp = grouped_pipes[i]->stream_res.opp;
2258 tg = grouped_pipes[i]->stream_res.tg;
2259 tg->funcs->get_otg_active_size(tg, &width, &height);
2260 if (opp->funcs->opp_program_dpg_dimensions)
2261 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2265 void dcn10_enable_timing_synchronization(
2269 struct pipe_ctx *grouped_pipes[])
2271 struct dc_context *dc_ctx = dc->ctx;
2272 struct output_pixel_processor *opp;
2273 struct timing_generator *tg;
2274 int i, width, height;
2276 DC_SYNC_INFO("Setting up OTG reset trigger\n");
2278 for (i = 1; i < group_size; i++) {
2279 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2282 opp = grouped_pipes[i]->stream_res.opp;
2283 tg = grouped_pipes[i]->stream_res.tg;
2284 tg->funcs->get_otg_active_size(tg, &width, &height);
2286 if (!tg->funcs->is_tg_enabled(tg)) {
2287 DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2291 if (opp->funcs->opp_program_dpg_dimensions)
2292 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2295 for (i = 0; i < group_size; i++) {
2296 if (grouped_pipes[i]->stream == NULL)
2299 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2302 grouped_pipes[i]->stream->vblank_synchronized = false;
2305 for (i = 1; i < group_size; i++) {
2306 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2309 grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2310 grouped_pipes[i]->stream_res.tg,
2311 grouped_pipes[0]->stream_res.tg->inst);
2314 DC_SYNC_INFO("Waiting for trigger\n");
2316 /* Need to get only check 1 pipe for having reset as all the others are
2317 * synchronized. Look at last pipe programmed to reset.
2320 if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM)
2321 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2323 for (i = 1; i < group_size; i++) {
2324 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2327 grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2328 grouped_pipes[i]->stream_res.tg);
2331 for (i = 1; i < group_size; i++) {
2332 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2335 opp = grouped_pipes[i]->stream_res.opp;
2336 tg = grouped_pipes[i]->stream_res.tg;
2337 tg->funcs->get_otg_active_size(tg, &width, &height);
2338 if (opp->funcs->opp_program_dpg_dimensions)
2339 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2342 DC_SYNC_INFO("Sync complete\n");
2345 void dcn10_enable_per_frame_crtc_position_reset(
2348 struct pipe_ctx *grouped_pipes[])
2350 struct dc_context *dc_ctx = dc->ctx;
2353 DC_SYNC_INFO("Setting up\n");
2354 for (i = 0; i < group_size; i++)
2355 if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2356 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2357 grouped_pipes[i]->stream_res.tg,
2359 &grouped_pipes[i]->stream->triggered_crtc_reset);
2361 DC_SYNC_INFO("Waiting for trigger\n");
2363 for (i = 0; i < group_size; i++)
2364 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2366 DC_SYNC_INFO("Multi-display sync is complete\n");
2369 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2370 struct vm_system_aperture_param *apt,
2371 struct dce_hwseq *hws)
2373 PHYSICAL_ADDRESS_LOC physical_page_number;
2374 uint32_t logical_addr_low;
2375 uint32_t logical_addr_high;
2377 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2378 PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2379 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2380 PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2382 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2383 LOGICAL_ADDR, &logical_addr_low);
2385 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2386 LOGICAL_ADDR, &logical_addr_high);
2388 apt->sys_default.quad_part = physical_page_number.quad_part << 12;
2389 apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
2390 apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
2393 /* Temporary read settings, future will get values from kmd directly */
2394 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2395 struct vm_context0_param *vm0,
2396 struct dce_hwseq *hws)
2398 PHYSICAL_ADDRESS_LOC fb_base;
2399 PHYSICAL_ADDRESS_LOC fb_offset;
2400 uint32_t fb_base_value;
2401 uint32_t fb_offset_value;
2403 REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2404 REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2406 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2407 PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2408 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2409 PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2411 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2412 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2413 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2414 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2416 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2417 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2418 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2419 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2421 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2422 PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2423 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2424 PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2427 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2428 * Therefore we need to do
2429 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2430 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2432 fb_base.quad_part = (uint64_t)fb_base_value << 24;
2433 fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2434 vm0->pte_base.quad_part += fb_base.quad_part;
2435 vm0->pte_base.quad_part -= fb_offset.quad_part;
2439 static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2441 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2442 struct vm_system_aperture_param apt = {0};
2443 struct vm_context0_param vm0 = {0};
2445 mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2446 mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2448 hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2449 hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2452 static void dcn10_enable_plane(
2454 struct pipe_ctx *pipe_ctx,
2455 struct dc_state *context)
2457 struct dce_hwseq *hws = dc->hwseq;
2459 if (dc->debug.sanity_checks) {
2460 hws->funcs.verify_allow_pstate_change_high(dc);
2463 undo_DEGVIDCN10_253_wa(dc);
2465 power_on_plane(dc->hwseq,
2466 pipe_ctx->plane_res.hubp->inst);
2468 /* enable DCFCLK current DCHUB */
2469 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2471 /* make sure OPP_PIPE_CLOCK_EN = 1 */
2472 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2473 pipe_ctx->stream_res.opp,
2476 if (dc->config.gpu_vm_support)
2477 dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2479 if (dc->debug.sanity_checks) {
2480 hws->funcs.verify_allow_pstate_change_high(dc);
2483 if (!pipe_ctx->top_pipe
2484 && pipe_ctx->plane_state
2485 && pipe_ctx->plane_state->flip_int_enabled
2486 && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2487 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2491 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2494 struct dpp_grph_csc_adjustment adjust;
2495 memset(&adjust, 0, sizeof(adjust));
2496 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2499 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2500 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2501 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2502 adjust.temperature_matrix[i] =
2503 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2504 } else if (pipe_ctx->plane_state &&
2505 pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2506 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2507 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2508 adjust.temperature_matrix[i] =
2509 pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2512 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2516 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2518 if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2519 if (pipe_ctx->top_pipe) {
2520 struct pipe_ctx *top = pipe_ctx->top_pipe;
2522 while (top->top_pipe)
2523 top = top->top_pipe; // Traverse to top pipe_ctx
2524 if (top->plane_state && top->plane_state->layer_index == 0)
2525 return true; // Front MPO plane not hidden
2531 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2533 // Override rear plane RGB bias to fix MPO brightness
2534 uint16_t rgb_bias = matrix[3];
2539 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2540 matrix[3] = rgb_bias;
2541 matrix[7] = rgb_bias;
2542 matrix[11] = rgb_bias;
2545 void dcn10_program_output_csc(struct dc *dc,
2546 struct pipe_ctx *pipe_ctx,
2547 enum dc_color_space colorspace,
2551 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2552 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2554 /* MPO is broken with RGB colorspaces when OCSC matrix
2555 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2556 * Blending adds offsets from front + rear to rear plane
2558 * Fix is to set RGB bias to 0 on rear plane, top plane
2559 * black value pixels add offset instead of rear + front
2562 int16_t rgb_bias = matrix[3];
2563 // matrix[3/7/11] are all the same offset value
2565 if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2566 dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2568 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2572 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2573 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2577 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2579 struct dc_bias_and_scale bns_params = {0};
2581 // program the input csc
2582 dpp->funcs->dpp_setup(dpp,
2583 plane_state->format,
2584 EXPANSION_MODE_ZERO,
2585 plane_state->input_csc_color_matrix,
2586 plane_state->color_space,
2589 //set scale and bias registers
2590 build_prescale_params(&bns_params, plane_state);
2591 if (dpp->funcs->dpp_program_bias_and_scale)
2592 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2595 void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
2597 struct mpc *mpc = dc->res_pool->mpc;
2599 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
2600 get_hdr_visual_confirm_color(pipe_ctx, color);
2601 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
2602 get_surface_visual_confirm_color(pipe_ctx, color);
2603 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
2604 get_surface_tile_visual_confirm_color(pipe_ctx, color);
2606 color_space_to_black_color(
2607 dc, pipe_ctx->stream->output_color_space, color);
2609 if (mpc->funcs->set_bg_color) {
2610 memcpy(&pipe_ctx->plane_state->visual_confirm_color, color, sizeof(struct tg_color));
2611 mpc->funcs->set_bg_color(mpc, color, mpcc_id);
2615 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2617 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2618 struct mpcc_blnd_cfg blnd_cfg = {0};
2619 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2621 struct mpcc *new_mpcc;
2622 struct mpc *mpc = dc->res_pool->mpc;
2623 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2625 blnd_cfg.overlap_only = false;
2626 blnd_cfg.global_gain = 0xff;
2628 if (per_pixel_alpha) {
2629 /* DCN1.0 has output CM before MPC which seems to screw with
2630 * pre-multiplied alpha.
2632 blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
2633 pipe_ctx->stream->output_color_space)
2634 && pipe_ctx->plane_state->pre_multiplied_alpha);
2635 if (pipe_ctx->plane_state->global_alpha) {
2636 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
2637 blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
2639 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2642 blnd_cfg.pre_multiplied_alpha = false;
2643 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2646 if (pipe_ctx->plane_state->global_alpha)
2647 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2649 blnd_cfg.global_alpha = 0xff;
2653 * Note: currently there is a bug in init_hw such that
2654 * on resume from hibernate, BIOS sets up MPCC0, and
2655 * we do mpcc_remove but the mpcc cannot go to idle
2656 * after remove. This cause us to pick mpcc1 here,
2657 * which causes a pstate hang for yet unknown reason.
2659 mpcc_id = hubp->inst;
2661 /* If there is no full update, don't need to touch MPC tree*/
2662 if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2663 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2664 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2668 /* check if this MPCC is already being used */
2669 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2670 /* remove MPCC if being used */
2671 if (new_mpcc != NULL)
2672 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2674 if (dc->debug.sanity_checks)
2675 mpc->funcs->assert_mpcc_idle_before_connect(
2676 dc->res_pool->mpc, mpcc_id);
2678 /* Call MPC to insert new plane */
2679 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2686 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2688 ASSERT(new_mpcc != NULL);
2689 hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2690 hubp->mpcc_id = mpcc_id;
2693 static void update_scaler(struct pipe_ctx *pipe_ctx)
2695 bool per_pixel_alpha =
2696 pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2698 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2699 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2700 /* scaler configuration */
2701 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2702 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2705 static void dcn10_update_dchubp_dpp(
2707 struct pipe_ctx *pipe_ctx,
2708 struct dc_state *context)
2710 struct dce_hwseq *hws = dc->hwseq;
2711 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2712 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2713 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2714 struct plane_size size = plane_state->plane_size;
2715 unsigned int compat_level = 0;
2716 bool should_divided_by_2 = false;
2718 /* depends on DML calculation, DPP clock value may change dynamically */
2719 /* If request max dpp clk is lower than current dispclk, no need to
2722 if (plane_state->update_flags.bits.full_update) {
2724 /* new calculated dispclk, dppclk are stored in
2725 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2726 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2727 * dcn10_validate_bandwidth compute new dispclk, dppclk.
2728 * dispclk will put in use after optimize_bandwidth when
2729 * ramp_up_dispclk_with_dpp is called.
2730 * there are two places for dppclk be put in use. One location
2731 * is the same as the location as dispclk. Another is within
2732 * update_dchubp_dpp which happens between pre_bandwidth and
2733 * optimize_bandwidth.
2734 * dppclk updated within update_dchubp_dpp will cause new
2735 * clock values of dispclk and dppclk not be in use at the same
2736 * time. when clocks are decreased, this may cause dppclk is
2737 * lower than previous configuration and let pipe stuck.
2738 * for example, eDP + external dp, change resolution of DP from
2739 * 1920x1080x144hz to 1280x960x60hz.
2740 * before change: dispclk = 337889 dppclk = 337889
2741 * change mode, dcn10_validate_bandwidth calculate
2742 * dispclk = 143122 dppclk = 143122
2743 * update_dchubp_dpp be executed before dispclk be updated,
2744 * dispclk = 337889, but dppclk use new value dispclk /2 =
2745 * 168944. this will cause pipe pstate warning issue.
2746 * solution: between pre_bandwidth and optimize_bandwidth, while
2747 * dispclk is going to be decreased, keep dppclk = dispclk
2749 if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2750 dc->clk_mgr->clks.dispclk_khz)
2751 should_divided_by_2 = false;
2753 should_divided_by_2 =
2754 context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2755 dc->clk_mgr->clks.dispclk_khz / 2;
2757 dpp->funcs->dpp_dppclk_control(
2759 should_divided_by_2,
2762 if (dc->res_pool->dccg)
2763 dc->res_pool->dccg->funcs->update_dpp_dto(
2766 pipe_ctx->plane_res.bw.dppclk_khz);
2768 dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2769 dc->clk_mgr->clks.dispclk_khz / 2 :
2770 dc->clk_mgr->clks.dispclk_khz;
2773 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2774 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2775 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2777 if (plane_state->update_flags.bits.full_update) {
2778 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2780 hubp->funcs->hubp_setup(
2782 &pipe_ctx->dlg_regs,
2783 &pipe_ctx->ttu_regs,
2785 &pipe_ctx->pipe_dlg_param);
2786 hubp->funcs->hubp_setup_interdependent(
2788 &pipe_ctx->dlg_regs,
2789 &pipe_ctx->ttu_regs);
2792 size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2794 if (plane_state->update_flags.bits.full_update ||
2795 plane_state->update_flags.bits.bpp_change)
2796 dcn10_update_dpp(dpp, plane_state);
2798 if (plane_state->update_flags.bits.full_update ||
2799 plane_state->update_flags.bits.per_pixel_alpha_change ||
2800 plane_state->update_flags.bits.global_alpha_change)
2801 hws->funcs.update_mpcc(dc, pipe_ctx);
2803 if (plane_state->update_flags.bits.full_update ||
2804 plane_state->update_flags.bits.per_pixel_alpha_change ||
2805 plane_state->update_flags.bits.global_alpha_change ||
2806 plane_state->update_flags.bits.scaling_change ||
2807 plane_state->update_flags.bits.position_change) {
2808 update_scaler(pipe_ctx);
2811 if (plane_state->update_flags.bits.full_update ||
2812 plane_state->update_flags.bits.scaling_change ||
2813 plane_state->update_flags.bits.position_change) {
2814 hubp->funcs->mem_program_viewport(
2816 &pipe_ctx->plane_res.scl_data.viewport,
2817 &pipe_ctx->plane_res.scl_data.viewport_c);
2820 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2821 dc->hwss.set_cursor_position(pipe_ctx);
2822 dc->hwss.set_cursor_attribute(pipe_ctx);
2824 if (dc->hwss.set_cursor_sdr_white_level)
2825 dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2828 if (plane_state->update_flags.bits.full_update) {
2830 dc->hwss.program_gamut_remap(pipe_ctx);
2832 dc->hwss.program_output_csc(dc,
2834 pipe_ctx->stream->output_color_space,
2835 pipe_ctx->stream->csc_color_matrix.matrix,
2836 pipe_ctx->stream_res.opp->inst);
2839 if (plane_state->update_flags.bits.full_update ||
2840 plane_state->update_flags.bits.pixel_format_change ||
2841 plane_state->update_flags.bits.horizontal_mirror_change ||
2842 plane_state->update_flags.bits.rotation_change ||
2843 plane_state->update_flags.bits.swizzle_change ||
2844 plane_state->update_flags.bits.dcc_change ||
2845 plane_state->update_flags.bits.bpp_change ||
2846 plane_state->update_flags.bits.scaling_change ||
2847 plane_state->update_flags.bits.plane_size_change) {
2848 hubp->funcs->hubp_program_surface_config(
2850 plane_state->format,
2851 &plane_state->tiling_info,
2853 plane_state->rotation,
2855 plane_state->horizontal_mirror,
2859 hubp->power_gated = false;
2861 hws->funcs.update_plane_addr(dc, pipe_ctx);
2863 if (is_pipe_tree_visible(pipe_ctx))
2864 hubp->funcs->set_blank(hubp, false);
2867 void dcn10_blank_pixel_data(
2869 struct pipe_ctx *pipe_ctx,
2872 enum dc_color_space color_space;
2873 struct tg_color black_color = {0};
2874 struct stream_resource *stream_res = &pipe_ctx->stream_res;
2875 struct dc_stream_state *stream = pipe_ctx->stream;
2877 /* program otg blank color */
2878 color_space = stream->output_color_space;
2879 color_space_to_black_color(dc, color_space, &black_color);
2882 * The way 420 is packed, 2 channels carry Y component, 1 channel
2883 * alternate between Cb and Cr, so both channels need the pixel
2886 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2887 black_color.color_r_cr = black_color.color_g_y;
2890 if (stream_res->tg->funcs->set_blank_color)
2891 stream_res->tg->funcs->set_blank_color(
2896 if (stream_res->tg->funcs->set_blank)
2897 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2898 if (stream_res->abm) {
2899 dc->hwss.set_pipe(pipe_ctx);
2900 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2903 dc->hwss.set_abm_immediate_disable(pipe_ctx);
2904 if (stream_res->tg->funcs->set_blank) {
2905 stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2906 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2911 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2913 struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2914 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2915 struct custom_float_format fmt;
2917 fmt.exponenta_bits = 6;
2918 fmt.mantissa_bits = 12;
2922 if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2923 convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2925 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2926 pipe_ctx->plane_res.dpp, hw_mult);
2929 void dcn10_program_pipe(
2931 struct pipe_ctx *pipe_ctx,
2932 struct dc_state *context)
2934 struct dce_hwseq *hws = dc->hwseq;
2936 if (pipe_ctx->top_pipe == NULL) {
2937 bool blank = !is_pipe_tree_visible(pipe_ctx);
2939 pipe_ctx->stream_res.tg->funcs->program_global_sync(
2940 pipe_ctx->stream_res.tg,
2941 calculate_vready_offset_for_group(pipe_ctx),
2942 pipe_ctx->pipe_dlg_param.vstartup_start,
2943 pipe_ctx->pipe_dlg_param.vupdate_offset,
2944 pipe_ctx->pipe_dlg_param.vupdate_width);
2946 pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2947 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2949 if (hws->funcs.setup_vupdate_interrupt)
2950 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2952 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2955 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2956 dcn10_enable_plane(dc, pipe_ctx, context);
2958 dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2960 hws->funcs.set_hdr_multiplier(pipe_ctx);
2962 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2963 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2964 pipe_ctx->plane_state->update_flags.bits.gamma_change)
2965 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2967 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
2968 * only do gamma programming for full update.
2969 * TODO: This can be further optimized/cleaned up
2970 * Always call this for now since it does memcmp inside before
2971 * doing heavy calculation and programming
2973 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2974 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2977 void dcn10_wait_for_pending_cleared(struct dc *dc,
2978 struct dc_state *context)
2980 struct pipe_ctx *pipe_ctx;
2981 struct timing_generator *tg;
2984 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2985 pipe_ctx = &context->res_ctx.pipe_ctx[i];
2986 tg = pipe_ctx->stream_res.tg;
2989 * Only wait for top pipe's tg penindg bit
2990 * Also skip if pipe is disabled.
2992 if (pipe_ctx->top_pipe ||
2993 !pipe_ctx->stream || !pipe_ctx->plane_state ||
2994 !tg->funcs->is_tg_enabled(tg))
2998 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
2999 * For some reason waiting for OTG_UPDATE_PENDING cleared
3000 * seems to not trigger the update right away, and if we
3001 * lock again before VUPDATE then we don't get a separated
3004 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
3005 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
3009 void dcn10_post_unlock_program_front_end(
3011 struct dc_state *context)
3015 DC_LOGGER_INIT(dc->ctx->logger);
3017 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3018 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3020 if (!pipe_ctx->top_pipe &&
3021 !pipe_ctx->prev_odm_pipe &&
3023 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3025 if (context->stream_status[i].plane_count == 0)
3026 false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
3030 for (i = 0; i < dc->res_pool->pipe_count; i++)
3031 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
3032 dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
3034 for (i = 0; i < dc->res_pool->pipe_count; i++)
3035 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
3036 dc->hwss.optimize_bandwidth(dc, context);
3040 if (dc->hwseq->wa.DEGVIDCN10_254)
3041 hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
3044 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
3048 for (i = 0; i < context->stream_count; i++) {
3049 if (context->streams[i]->timing.timing_3d_format
3050 == TIMING_3D_FORMAT_HW_FRAME_PACKING) {
3054 hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
3060 void dcn10_prepare_bandwidth(
3062 struct dc_state *context)
3064 struct dce_hwseq *hws = dc->hwseq;
3065 struct hubbub *hubbub = dc->res_pool->hubbub;
3066 int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3068 if (dc->debug.sanity_checks)
3069 hws->funcs.verify_allow_pstate_change_high(dc);
3071 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3072 if (context->stream_count == 0)
3073 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3075 dc->clk_mgr->funcs->update_clocks(
3081 dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
3082 &context->bw_ctx.bw.dcn.watermarks,
3083 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3085 dcn10_stereo_hw_frame_pack_wa(dc, context);
3087 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3090 dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3092 dcn_bw_notify_pplib_of_wm_ranges(
3093 dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3096 if (dc->debug.sanity_checks)
3097 hws->funcs.verify_allow_pstate_change_high(dc);
3100 void dcn10_optimize_bandwidth(
3102 struct dc_state *context)
3104 struct dce_hwseq *hws = dc->hwseq;
3105 struct hubbub *hubbub = dc->res_pool->hubbub;
3106 int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3108 if (dc->debug.sanity_checks)
3109 hws->funcs.verify_allow_pstate_change_high(dc);
3111 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3112 if (context->stream_count == 0)
3113 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3115 dc->clk_mgr->funcs->update_clocks(
3121 hubbub->funcs->program_watermarks(hubbub,
3122 &context->bw_ctx.bw.dcn.watermarks,
3123 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3126 dcn10_stereo_hw_frame_pack_wa(dc, context);
3128 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3131 dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3133 dcn_bw_notify_pplib_of_wm_ranges(
3134 dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3137 if (dc->debug.sanity_checks)
3138 hws->funcs.verify_allow_pstate_change_high(dc);
3141 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3142 int num_pipes, struct dc_crtc_timing_adjust adjust)
3145 struct drr_params params = {0};
3146 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3147 unsigned int event_triggers = 0x800;
3148 // Note DRR trigger events are generated regardless of whether num frames met.
3149 unsigned int num_frames = 2;
3151 params.vertical_total_max = adjust.v_total_max;
3152 params.vertical_total_min = adjust.v_total_min;
3153 params.vertical_total_mid = adjust.v_total_mid;
3154 params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3155 /* TODO: If multiple pipes are to be supported, you need
3156 * some GSL stuff. Static screen triggers may be programmed differently
3159 for (i = 0; i < num_pipes; i++) {
3160 if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
3161 if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
3162 pipe_ctx[i]->stream_res.tg->funcs->set_drr(
3163 pipe_ctx[i]->stream_res.tg, ¶ms);
3164 if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3165 if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
3166 pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
3167 pipe_ctx[i]->stream_res.tg,
3168 event_triggers, num_frames);
3173 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3175 struct crtc_position *position)
3179 /* TODO: handle pipes > 1
3181 for (i = 0; i < num_pipes; i++)
3182 pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3185 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3186 int num_pipes, const struct dc_static_screen_params *params)
3189 unsigned int triggers = 0;
3191 if (params->triggers.surface_update)
3193 if (params->triggers.cursor_update)
3195 if (params->triggers.force_trigger)
3198 for (i = 0; i < num_pipes; i++)
3199 pipe_ctx[i]->stream_res.tg->funcs->
3200 set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3201 triggers, params->num_frames);
3204 static void dcn10_config_stereo_parameters(
3205 struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3207 enum view_3d_format view_format = stream->view_format;
3208 enum dc_timing_3d_format timing_3d_format =\
3209 stream->timing.timing_3d_format;
3210 bool non_stereo_timing = false;
3212 if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3213 timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3214 timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3215 non_stereo_timing = true;
3217 if (non_stereo_timing == false &&
3218 view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3220 flags->PROGRAM_STEREO = 1;
3221 flags->PROGRAM_POLARITY = 1;
3222 if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
3223 timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3224 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3225 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3226 enum display_dongle_type dongle = \
3227 stream->link->ddc->dongle_type;
3228 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3229 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3230 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3231 flags->DISABLE_STEREO_DP_SYNC = 1;
3233 flags->RIGHT_EYE_POLARITY =\
3234 stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3235 if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3236 flags->FRAME_PACKED = 1;
3242 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3244 struct crtc_stereo_flags flags = { 0 };
3245 struct dc_stream_state *stream = pipe_ctx->stream;
3247 dcn10_config_stereo_parameters(stream, &flags);
3249 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3250 if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3251 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3253 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3256 pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3257 pipe_ctx->stream_res.opp,
3258 flags.PROGRAM_STEREO == 1,
3261 pipe_ctx->stream_res.tg->funcs->program_stereo(
3262 pipe_ctx->stream_res.tg,
3269 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3273 for (i = 0; i < res_pool->pipe_count; i++) {
3274 if (res_pool->hubps[i]->inst == mpcc_inst)
3275 return res_pool->hubps[i];
3281 void dcn10_wait_for_mpcc_disconnect(
3283 struct resource_pool *res_pool,
3284 struct pipe_ctx *pipe_ctx)
3286 struct dce_hwseq *hws = dc->hwseq;
3289 if (dc->debug.sanity_checks) {
3290 hws->funcs.verify_allow_pstate_change_high(dc);
3293 if (!pipe_ctx->stream_res.opp)
3296 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3297 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3298 struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3300 if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
3301 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3302 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3303 hubp->funcs->set_blank(hubp, true);
3307 if (dc->debug.sanity_checks) {
3308 hws->funcs.verify_allow_pstate_change_high(dc);
3313 bool dcn10_dummy_display_power_gating(
3315 uint8_t controller_id,
3316 struct dc_bios *dcb,
3317 enum pipe_gating_control power_gating)
3322 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3324 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3325 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3327 struct dc *dc = pipe_ctx->stream->ctx->dc;
3329 if (plane_state == NULL)
3332 flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3333 pipe_ctx->plane_res.hubp);
3335 plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3338 plane_state->status.current_address = plane_state->status.requested_address;
3340 if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3341 tg->funcs->is_stereo_left_eye) {
3342 plane_state->status.is_right_eye =
3343 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3346 if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3347 struct dce_hwseq *hwseq = dc->hwseq;
3348 struct timing_generator *tg = dc->res_pool->timing_generators[0];
3349 unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3351 if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3352 struct hubbub *hubbub = dc->res_pool->hubbub;
3354 hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3355 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3360 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3362 struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3364 /* In DCN, this programming sequence is owned by the hubbub */
3365 hubbub->funcs->update_dchub(hubbub, dh_data);
3368 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3370 struct pipe_ctx *test_pipe, *split_pipe;
3371 const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
3372 struct rect r1 = scl_data->recout, r2, r2_half;
3373 int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
3374 int cur_layer = pipe_ctx->plane_state->layer_index;
3377 * Disable the cursor if there's another pipe above this with a
3378 * plane that contains this pipe's viewport to prevent double cursor
3379 * and incorrect scaling artifacts.
3381 for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3382 test_pipe = test_pipe->top_pipe) {
3383 // Skip invisible layer and pipe-split plane on same layer
3384 if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
3387 r2 = test_pipe->plane_res.scl_data.recout;
3388 r2_r = r2.x + r2.width;
3389 r2_b = r2.y + r2.height;
3390 split_pipe = test_pipe;
3393 * There is another half plane on same layer because of
3394 * pipe-split, merge together per same height.
3396 for (split_pipe = pipe_ctx->top_pipe; split_pipe;
3397 split_pipe = split_pipe->top_pipe)
3398 if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
3399 r2_half = split_pipe->plane_res.scl_data.recout;
3400 r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
3401 r2.width = r2.width + r2_half.width;
3402 r2_r = r2.x + r2.width;
3406 if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
3413 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3415 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3416 struct hubp *hubp = pipe_ctx->plane_res.hubp;
3417 struct dpp *dpp = pipe_ctx->plane_res.dpp;
3418 struct dc_cursor_mi_param param = {
3419 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3420 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3421 .viewport = pipe_ctx->plane_res.scl_data.viewport,
3422 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3423 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3424 .rotation = pipe_ctx->plane_state->rotation,
3425 .mirror = pipe_ctx->plane_state->horizontal_mirror
3427 bool pipe_split_on = false;
3428 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3429 (pipe_ctx->prev_odm_pipe != NULL);
3431 int x_plane = pipe_ctx->plane_state->dst_rect.x;
3432 int y_plane = pipe_ctx->plane_state->dst_rect.y;
3433 int x_pos = pos_cpy.x;
3434 int y_pos = pos_cpy.y;
3436 if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
3437 if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
3438 (pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
3439 pipe_split_on = true;
3444 * DC cursor is stream space, HW cursor is plane space and drawn
3445 * as part of the framebuffer.
3447 * Cursor position can't be negative, but hotspot can be used to
3448 * shift cursor out of the plane bounds. Hotspot must be smaller
3449 * than the cursor size.
3453 * Translate cursor from stream space to plane space.
3455 * If the cursor is scaled then we need to scale the position
3456 * to be in the approximately correct place. We can't do anything
3457 * about the actual size being incorrect, that's a limitation of
3460 if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3461 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3462 pipe_ctx->plane_state->dst_rect.width;
3463 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3464 pipe_ctx->plane_state->dst_rect.height;
3466 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3467 pipe_ctx->plane_state->dst_rect.width;
3468 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3469 pipe_ctx->plane_state->dst_rect.height;
3473 * If the cursor's source viewport is clipped then we need to
3474 * translate the cursor to appear in the correct position on
3477 * This translation isn't affected by scaling so it needs to be
3478 * done *after* we adjust the position for the scale factor.
3480 * This is only done by opt-in for now since there are still
3481 * some usecases like tiled display that might enable the
3482 * cursor on both streams while expecting dc to clip it.
3484 if (pos_cpy.translate_by_source) {
3485 x_pos += pipe_ctx->plane_state->src_rect.x;
3486 y_pos += pipe_ctx->plane_state->src_rect.y;
3490 * If the position is negative then we need to add to the hotspot
3491 * to shift the cursor outside the plane.
3495 pos_cpy.x_hotspot -= x_pos;
3500 pos_cpy.y_hotspot -= y_pos;
3504 pos_cpy.x = (uint32_t)x_pos;
3505 pos_cpy.y = (uint32_t)y_pos;
3507 if (pipe_ctx->plane_state->address.type
3508 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3509 pos_cpy.enable = false;
3511 if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3512 pos_cpy.enable = false;
3515 if (param.rotation == ROTATION_ANGLE_0) {
3516 int viewport_width =
3517 pipe_ctx->plane_res.scl_data.viewport.width;
3519 pipe_ctx->plane_res.scl_data.viewport.x;
3522 if (pipe_split_on || odm_combine_on) {
3523 if (pos_cpy.x >= viewport_width + viewport_x) {
3524 pos_cpy.x = 2 * viewport_width
3525 - pos_cpy.x + 2 * viewport_x;
3527 uint32_t temp_x = pos_cpy.x;
3529 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3530 if (temp_x >= viewport_x +
3531 (int)hubp->curs_attr.width || pos_cpy.x
3532 <= (int)hubp->curs_attr.width +
3533 pipe_ctx->plane_state->src_rect.x) {
3534 pos_cpy.x = temp_x + viewport_width;
3538 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3542 // Swap axis and mirror horizontally
3543 else if (param.rotation == ROTATION_ANGLE_90) {
3544 uint32_t temp_x = pos_cpy.x;
3546 pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3547 (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3550 // Swap axis and mirror vertically
3551 else if (param.rotation == ROTATION_ANGLE_270) {
3552 uint32_t temp_y = pos_cpy.y;
3553 int viewport_height =
3554 pipe_ctx->plane_res.scl_data.viewport.height;
3556 pipe_ctx->plane_res.scl_data.viewport.y;
3559 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3560 * For pipe split cases:
3561 * - apply offset of viewport.y to normalize pos_cpy.x
3562 * - calculate the pos_cpy.y as before
3563 * - shift pos_cpy.y back by same offset to get final value
3564 * - since we iterate through both pipes, use the lower
3565 * viewport.y for offset
3566 * For non pipe split cases, use the same calculation for
3567 * pos_cpy.y as the 180 degree rotation case below,
3568 * but use pos_cpy.x as our input because we are rotating
3571 if (pipe_split_on || odm_combine_on) {
3572 int pos_cpy_x_offset;
3573 int other_pipe_viewport_y;
3575 if (pipe_split_on) {
3576 if (pipe_ctx->bottom_pipe) {
3577 other_pipe_viewport_y =
3578 pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3580 other_pipe_viewport_y =
3581 pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3584 if (pipe_ctx->next_odm_pipe) {
3585 other_pipe_viewport_y =
3586 pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3588 other_pipe_viewport_y =
3589 pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3592 pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3593 other_pipe_viewport_y : viewport_y;
3594 pos_cpy.x -= pos_cpy_x_offset;
3595 if (pos_cpy.x > viewport_height) {
3596 pos_cpy.x = pos_cpy.x - viewport_height;
3597 pos_cpy.y = viewport_height - pos_cpy.x;
3599 pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3601 pos_cpy.y += pos_cpy_x_offset;
3603 pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3607 // Mirror horizontally and vertically
3608 else if (param.rotation == ROTATION_ANGLE_180) {
3609 int viewport_width =
3610 pipe_ctx->plane_res.scl_data.viewport.width;
3612 pipe_ctx->plane_res.scl_data.viewport.x;
3614 if (!param.mirror) {
3615 if (pipe_split_on || odm_combine_on) {
3616 if (pos_cpy.x >= viewport_width + viewport_x) {
3617 pos_cpy.x = 2 * viewport_width
3618 - pos_cpy.x + 2 * viewport_x;
3620 uint32_t temp_x = pos_cpy.x;
3622 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3623 if (temp_x >= viewport_x +
3624 (int)hubp->curs_attr.width || pos_cpy.x
3625 <= (int)hubp->curs_attr.width +
3626 pipe_ctx->plane_state->src_rect.x) {
3627 pos_cpy.x = temp_x + viewport_width;
3631 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3636 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3638 * delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3639 * pos_cpy.y_new = viewport.y + delta_from_bottom
3641 * pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3643 pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3644 pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3647 hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
3648 dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height);
3651 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3653 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3655 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3656 pipe_ctx->plane_res.hubp, attributes);
3657 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3658 pipe_ctx->plane_res.dpp, attributes);
3661 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3663 uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3664 struct fixed31_32 multiplier;
3665 struct dpp_cursor_attributes opt_attr = { 0 };
3666 uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3667 struct custom_float_format fmt;
3669 if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3672 fmt.exponenta_bits = 5;
3673 fmt.mantissa_bits = 10;
3676 if (sdr_white_level > 80) {
3677 multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3678 convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3681 opt_attr.scale = hw_scale;
3684 pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3685 pipe_ctx->plane_res.dpp, &opt_attr);
3689 * apply_front_porch_workaround TODO FPGA still need?
3691 * This is a workaround for a bug that has existed since R5xx and has not been
3692 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3694 static void apply_front_porch_workaround(
3695 struct dc_crtc_timing *timing)
3697 if (timing->flags.INTERLACE == 1) {
3698 if (timing->v_front_porch < 2)
3699 timing->v_front_porch = 2;
3701 if (timing->v_front_porch < 1)
3702 timing->v_front_porch = 1;
3706 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3708 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3709 struct dc_crtc_timing patched_crtc_timing;
3710 int vesa_sync_start;
3712 int interlace_factor;
3714 patched_crtc_timing = *dc_crtc_timing;
3715 apply_front_porch_workaround(&patched_crtc_timing);
3717 interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3719 vesa_sync_start = patched_crtc_timing.v_addressable +
3720 patched_crtc_timing.v_border_bottom +
3721 patched_crtc_timing.v_front_porch;
3723 asic_blank_end = (patched_crtc_timing.v_total -
3725 patched_crtc_timing.v_border_top)
3728 return asic_blank_end -
3729 pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3732 void dcn10_calc_vupdate_position(
3734 struct pipe_ctx *pipe_ctx,
3735 uint32_t *start_line,
3738 const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3739 int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3741 if (vupdate_pos >= 0)
3742 *start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
3744 *start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
3745 *end_line = (*start_line + 2) % timing->v_total;
3748 static void dcn10_cal_vline_position(
3750 struct pipe_ctx *pipe_ctx,
3751 uint32_t *start_line,
3754 const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3755 int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
3757 if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
3760 else if (vline_pos < 0)
3763 vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3765 *start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
3767 *start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
3768 *end_line = (*start_line + 2) % timing->v_total;
3769 } else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
3770 // vsync is line 0 so start_line is just the requested line offset
3771 *start_line = vline_pos;
3772 *end_line = (*start_line + 2) % timing->v_total;
3777 void dcn10_setup_periodic_interrupt(
3779 struct pipe_ctx *pipe_ctx)
3781 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3782 uint32_t start_line = 0;
3783 uint32_t end_line = 0;
3785 dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
3787 tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3790 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3792 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3793 int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3795 if (start_line < 0) {
3800 if (tg->funcs->setup_vertical_interrupt2)
3801 tg->funcs->setup_vertical_interrupt2(tg, start_line);
3804 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3805 struct dc_link_settings *link_settings)
3807 struct encoder_unblank_param params = {0};
3808 struct dc_stream_state *stream = pipe_ctx->stream;
3809 struct dc_link *link = stream->link;
3810 struct dce_hwseq *hws = link->dc->hwseq;
3812 /* only 3 items below are used by unblank */
3813 params.timing = pipe_ctx->stream->timing;
3815 params.link_settings.link_rate = link_settings->link_rate;
3817 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3818 if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3819 params.timing.pix_clk_100hz /= 2;
3820 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms);
3823 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3824 hws->funcs.edp_backlight_control(link, true);
3828 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3829 const uint8_t *custom_sdp_message,
3830 unsigned int sdp_message_size)
3832 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3833 pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3834 pipe_ctx->stream_res.stream_enc,
3839 enum dc_status dcn10_set_clock(struct dc *dc,
3840 enum dc_clock_type clock_type,
3844 struct dc_state *context = dc->current_state;
3845 struct dc_clock_config clock_cfg = {0};
3846 struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3848 if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3849 return DC_FAIL_UNSUPPORTED_1;
3851 dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3852 context, clock_type, &clock_cfg);
3854 if (clk_khz > clock_cfg.max_clock_khz)
3855 return DC_FAIL_CLK_EXCEED_MAX;
3857 if (clk_khz < clock_cfg.min_clock_khz)
3858 return DC_FAIL_CLK_BELOW_MIN;
3860 if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3861 return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3863 /*update internal request clock for update clock use*/
3864 if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3865 current_clocks->dispclk_khz = clk_khz;
3866 else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3867 current_clocks->dppclk_khz = clk_khz;
3869 return DC_ERROR_UNEXPECTED;
3871 if (dc->clk_mgr->funcs->update_clocks)
3872 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3878 void dcn10_get_clock(struct dc *dc,
3879 enum dc_clock_type clock_type,
3880 struct dc_clock_config *clock_cfg)
3882 struct dc_state *context = dc->current_state;
3884 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3885 dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3889 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3891 struct resource_pool *pool = dc->res_pool;
3894 for (i = 0; i < pool->pipe_count; i++) {
3895 struct hubp *hubp = pool->hubps[i];
3896 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3898 hubp->funcs->hubp_read_state(hubp);
3901 dcc_en_bits[i] = s->dcc_en ? 1 : 0;